[llvm] f68cad9 - [RISCV] Lower VLEFF/VLSEGFF SDNodes to MachineInstrs with VL outputs.

Yeting Kuo via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 9 22:57:17 PDT 2022


Author: Yeting Kuo
Date: 2022-06-10T13:57:10+08:00
New Revision: f68cad90870590b7b1854828f255070301873347

URL: https://github.com/llvm/llvm-project/commit/f68cad90870590b7b1854828f255070301873347
DIFF: https://github.com/llvm/llvm-project/commit/f68cad90870590b7b1854828f255070301873347.diff

LOG: [RISCV] Lower VLEFF/VLSEGFF SDNodes to MachineInstrs with VL outputs.

The patch is a replacement of D125199. PseudoReadVL with vtype has worry for
computing same vtypes of VLEFF/VLSEGFF in two different places, DAGToDAG and
InsertVSETVLI. VLEFF/VLSEGFF MI with VL output still could provide the vtype of
VLEFF/VLSEGFF to the users of its VL.

The patch names the new pseudo as original VLEFF/VLSEGFF name suffixed "_VL" and
expand them in RISCVInsertVSETVLI pass.

This patch also reverts commit 4537aae0d57e17c217c192d8977012ba475b130c,
"[RISCV] Make PseudoReadVL have the vtypes of the corresponding VLEFF/VLSEGFF.".

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D126794

Added: 
    llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll

Modified: 
    llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
    llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
    llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir

Removed: 
    llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll
    llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll
    llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll
    llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll


################################################################################
diff  --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
index 9b69170d1c4a6..b3c53c9bd5266 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.cpp
@@ -182,4 +182,9 @@ void RISCVVType::printVType(unsigned VType, raw_ostream &OS) {
     OS << ", mu";
 }
 
+bool isFaultFirstLoad(const MachineInstr &MI) {
+  return MI.getNumExplicitDefs() == 2 && MI.modifiesRegister(RISCV::VL) &&
+         !MI.isInlineAsm();
+}
+
 } // namespace llvm

diff  --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
index b0b3dc4fe7df0..9e343a21e7bc9 100644
--- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
+++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h
@@ -16,6 +16,7 @@
 #include "MCTargetDesc/RISCVMCTargetDesc.h"
 #include "llvm/ADT/StringRef.h"
 #include "llvm/ADT/StringSwitch.h"
+#include "llvm/CodeGen/MachineInstr.h"
 #include "llvm/MC/MCInstrDesc.h"
 #include "llvm/MC/SubtargetFeature.h"
 #include "llvm/Support/RISCVISAInfo.h"
@@ -432,6 +433,7 @@ void printVType(unsigned VType, raw_ostream &OS);
 
 } // namespace RISCVVType
 
+bool isFaultFirstLoad(const MachineInstr &MI);
 } // namespace llvm
 
 #endif

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index c1524e9808bf0..6f9d357b86651 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -402,8 +402,7 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
   MVT VT = Node->getSimpleValueType(0);
   MVT XLenVT = Subtarget->getXLenVT();
-  unsigned SEW = VT.getScalarSizeInBits();
-  unsigned Log2SEW = Log2_32(SEW);
+  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
 
   unsigned CurOp = 2;
@@ -426,19 +425,7 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
       RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
                             Log2SEW, static_cast<unsigned>(LMUL));
   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
-                                               MVT::Other, MVT::Glue, Operands);
-  bool TailAgnostic = true;
-  bool MaskAgnostic = false;
-  if (IsMasked) {
-    uint64_t Policy = Node->getConstantOperandVal(Node->getNumOperands() - 1);
-    TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
-    MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
-  }
-  unsigned VType =
-      RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic);
-  SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT);
-  SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
-                                          VTypeOp, /*Glue*/ SDValue(Load, 2));
+                                               XLenVT, MVT::Other, Operands);
 
   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
@@ -450,8 +437,8 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
                 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg));
   }
 
-  ReplaceUses(SDValue(Node, NF), SDValue(ReadVL, 0));   // VL
-  ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 1)); // Chain
+  ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));     // VL
+  ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain
   CurDAG->RemoveDeadNode(Node);
 }
 
@@ -1459,8 +1446,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
 
       MVT VT = Node->getSimpleValueType(0);
-      unsigned SEW = VT.getScalarSizeInBits();
-      unsigned Log2SEW = Log2_32(SEW);
+      unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
 
       unsigned CurOp = 2;
       // Masked intrinsic only have TU version pseduo instructions.
@@ -1480,31 +1466,12 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
       const RISCV::VLEPseudo *P =
           RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true,
                               Log2SEW, static_cast<unsigned>(LMUL));
-      MachineSDNode *Load =
-          CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
-                                 MVT::Other, MVT::Glue, Operands);
-      bool TailAgnostic = !IsTU;
-      bool MaskAgnostic = false;
-      if (IsMasked) {
-        uint64_t Policy =
-            Node->getConstantOperandVal(Node->getNumOperands() - 1);
-        TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
-        MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
-      }
-      unsigned VType =
-          RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic);
-      SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT);
-      SDNode *ReadVL =
-          CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, VTypeOp,
-                                 /*Glue*/ SDValue(Load, 2));
-
+      MachineSDNode *Load = CurDAG->getMachineNode(
+          P->Pseudo, DL, Node->getValueType(0), XLenVT, MVT::Other, Operands);
       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
 
-      ReplaceUses(SDValue(Node, 0), SDValue(Load, 0));
-      ReplaceUses(SDValue(Node, 1), SDValue(ReadVL, 0)); // VL
-      ReplaceUses(SDValue(Node, 2), SDValue(Load, 1));   // Chain
-      CurDAG->RemoveDeadNode(Node);
+      ReplaceNode(Node, Load);
       return;
     }
     }

diff  --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 984206baa7179..866a6d8edb395 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -7,7 +7,8 @@
 //===----------------------------------------------------------------------===//
 //
 // This file implements a function pass that inserts VSETVLI instructions where
-// needed.
+// needed and expands the vl outputs of VLEFF/VLSEGFF to PseudoReadVL
+// instructions.
 //
 // This pass consists of 3 phases:
 //
@@ -497,6 +498,7 @@ class RISCVInsertVSETVLI : public MachineFunctionPass {
   void doLocalPrepass(MachineBasicBlock &MBB);
   void doLocalPostpass(MachineBasicBlock &MBB);
   void doPRE(MachineBasicBlock &MBB);
+  void insertReadVL(MachineBasicBlock &MBB);
 };
 
 } // end anonymous namespace
@@ -1409,6 +1411,20 @@ void RISCVInsertVSETVLI::doLocalPostpass(MachineBasicBlock &MBB) {
     MI->eraseFromParent();
 }
 
+void RISCVInsertVSETVLI::insertReadVL(MachineBasicBlock &MBB) {
+  for (auto I = MBB.begin(), E = MBB.end(); I != E;) {
+    MachineInstr &MI = *I++;
+    if (isFaultFirstLoad(MI)) {
+      Register VLOutput = MI.getOperand(1).getReg();
+      if (!MRI->use_nodbg_empty(VLOutput))
+        BuildMI(MBB, I, MI.getDebugLoc(), TII->get(RISCV::PseudoReadVL),
+                VLOutput);
+      // We don't use the vl output of the VLEFF/VLSEGFF anymore.
+      MI.getOperand(1).setReg(RISCV::X0);
+    }
+  }
+}
+
 bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
   // Skip if the vector extension is not enabled.
   const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
@@ -1499,6 +1515,11 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) {
     }
   }
 
+  // Insert PseudoReadVL after VLEFF/VLSEGFF and replace it with the vl output
+  // of VLEFF/VLSEGFF.
+  for (MachineBasicBlock &MBB : MF)
+    insertReadVL(MBB);
+
   BlockInfo.clear();
   return HaveVectorOp;
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 3e5948f8ff169..99d8dbe4435fa 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1367,14 +1367,13 @@ std::string RISCVInstrInfo::createMIROperandComment(
 
   uint64_t TSFlags = MI.getDesc().TSFlags;
 
-  // Print the full VType operand of vsetvli/vsetivli and PseudoReadVL
-  // instructions, and the SEW operand of vector codegen pseudos.
-  if (((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
-        MI.getOpcode() == RISCV::PseudoVSETVLI ||
-        MI.getOpcode() == RISCV::PseudoVSETIVLI ||
-        MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
-       OpIdx == 2) ||
-      (MI.getOpcode() == RISCV::PseudoReadVL && OpIdx == 1)) {
+  // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
+  // operand of vector codegen pseudos.
+  if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
+       MI.getOpcode() == RISCV::PseudoVSETVLI ||
+       MI.getOpcode() == RISCV::PseudoVSETIVLI ||
+       MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
+      OpIdx == 2) {
     unsigned Imm = MI.getOperand(OpIdx).getImm();
     RISCVVType::printVType(Imm, OS);
   } else if (RISCVII::hasSEWOp(TSFlags)) {

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 9092310d07123..0ee84f29d1594 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -643,11 +643,11 @@ class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
   let VLMul = m.value;
 }
 
-class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF, bit DummyMask = 1> :
+class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit DummyMask = 1> :
       Pseudo<(outs RetClass:$rd),
              (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
       RISCVVPseudo,
-      RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
+      RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
@@ -657,11 +657,11 @@ class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF, bit DummyMask = 1> :
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
-class VPseudoUSLoadNoMaskTU<VReg RetClass, int EEW, bit isFF> :
+class VPseudoUSLoadNoMaskTU<VReg RetClass, int EEW> :
       Pseudo<(outs RetClass:$rd),
              (ins RetClass:$dest, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
       RISCVVPseudo,
-      RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
+      RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
@@ -673,13 +673,62 @@ class VPseudoUSLoadNoMaskTU<VReg RetClass, int EEW, bit isFF> :
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
-class VPseudoUSLoadMask<VReg RetClass, int EEW, bit isFF> :
+class VPseudoUSLoadMask<VReg RetClass, int EEW> :
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
               (ins GetVRegNoV0<RetClass>.R:$merge,
                    GPR:$rs1,
                    VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
       RISCVVPseudo,
-      RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
+      RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let Constraints = "$rd = $merge";
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
+  let HasVecPolicyOp = 1;
+  let UsesMaskPolicy = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoUSLoadFFNoMask<VReg RetClass, int EEW, bit DummyMask = 1> :
+      Pseudo<(outs RetClass:$rd, GPR:$vl),
+             (ins GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
+      RISCVVPseudo,
+      RISCVVLE</*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = DummyMask;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoUSLoadFFNoMaskTU<VReg RetClass, int EEW> :
+      Pseudo<(outs RetClass:$rd, GPR:$vl),
+             (ins RetClass:$dest, GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
+      RISCVVPseudo,
+      RISCVVLE</*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = 1;
+  let HasMergeOp = 1;
+  let Constraints = "$rd = $dest";
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoUSLoadFFMask<VReg RetClass, int EEW> :
+      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
+              (ins GetVRegNoV0<RetClass>.R:$merge,
+                   GPR:$rs1,
+                   VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>,
+      RISCVVPseudo,
+      RISCVVLE</*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
@@ -1311,11 +1360,11 @@ class VPseudoTernaryNoMaskWithPolicy<VReg RetClass,
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
-class VPseudoUSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
+class VPseudoUSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF>:
       Pseudo<(outs RetClass:$rd),
              (ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
       RISCVVPseudo,
-      RISCVVLSEG<NF, /*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
+      RISCVVLSEG<NF, /*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
@@ -1325,11 +1374,11 @@ class VPseudoUSSegLoadNoMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
-class VPseudoUSSegLoadNoMaskTU<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
+class VPseudoUSSegLoadNoMaskTU<VReg RetClass, int EEW, bits<4> NF>:
       Pseudo<(outs RetClass:$rd),
              (ins RetClass:$dest, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
       RISCVVPseudo,
-      RISCVVLSEG<NF, /*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
+      RISCVVLSEG<NF, /*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
@@ -1341,12 +1390,60 @@ class VPseudoUSSegLoadNoMaskTU<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
-class VPseudoUSSegLoadMask<VReg RetClass, int EEW, bits<4> NF, bit isFF>:
+class VPseudoUSSegLoadMask<VReg RetClass, int EEW, bits<4> NF>:
       Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
              (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
                   VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy),[]>,
       RISCVVPseudo,
-      RISCVVLSEG<NF, /*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/isFF, log2<EEW>.val, VLMul> {
+      RISCVVLSEG<NF, /*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/0, log2<EEW>.val, VLMul> {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let Constraints = "$rd = $merge";
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
+  let HasVecPolicyOp = 1;
+  let UsesMaskPolicy = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoUSSegLoadFFNoMask<VReg RetClass, int EEW, bits<4> NF>:
+      Pseudo<(outs RetClass:$rd, GPR:$vl),
+             (ins GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
+      RISCVVPseudo,
+      RISCVVLSEG<NF, /*Masked*/0, /*TU*/0, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoUSSegLoadFFNoMaskTU<VReg RetClass, int EEW, bits<4> NF>:
+      Pseudo<(outs RetClass:$rd, GPR:$vl),
+             (ins RetClass:$dest, GPR:$rs1, AVL:$avl, ixlenimm:$sew),[]>,
+      RISCVVPseudo,
+      RISCVVLSEG<NF, /*Masked*/0, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = 1;
+  let HasMergeOp = 1;
+  let Constraints = "$rd = $dest";
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoUSSegLoadFFMask<VReg RetClass, int EEW, bits<4> NF>:
+      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd, GPR:$vl),
+             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
+                  VMaskOp:$vm, AVL:$avl, ixlenimm:$sew, ixlenimm:$policy),[]>,
+      RISCVVPseudo,
+      RISCVVLSEG<NF, /*Masked*/1, /*TU*/1, /*Strided*/0, /*FF*/1, log2<EEW>.val, VLMul> {
   let mayLoad = 1;
   let mayStore = 0;
   let hasSideEffects = 0;
@@ -1563,13 +1660,13 @@ multiclass VPseudoUSLoad {
       defvar vreg = lmul.vrclass;
       let VLMul = lmul.value in {
         def "E" # eew # "_V_" # LInfo :
-          VPseudoUSLoadNoMask<vreg, eew, false>,
+          VPseudoUSLoadNoMask<vreg, eew>,
           VLESched<eew>;
         def "E" # eew # "_V_" # LInfo # "_TU":
-          VPseudoUSLoadNoMaskTU<vreg, eew, false>,
+          VPseudoUSLoadNoMaskTU<vreg, eew>,
           VLESched<eew>;
         def "E" # eew # "_V_" # LInfo # "_MASK" :
-          VPseudoUSLoadMask<vreg, eew, false>,
+          VPseudoUSLoadMask<vreg, eew>,
           VLESched<eew>;
       }
     }
@@ -1582,14 +1679,14 @@ multiclass VPseudoFFLoad {
       defvar LInfo = lmul.MX;
       defvar vreg = lmul.vrclass;
       let VLMul = lmul.value in {
-        def "E" # eew # "FF_V_" # LInfo :
-          VPseudoUSLoadNoMask<vreg, eew, true>,
+        def "E" # eew # "FF_V_" # LInfo:
+          VPseudoUSLoadFFNoMask<vreg, eew>,
           VLFSched<eew>;
         def "E" # eew # "FF_V_" # LInfo # "_TU":
-          VPseudoUSLoadNoMaskTU<vreg, eew, true>,
+          VPseudoUSLoadFFNoMaskTU<vreg, eew>,
           VLFSched<eew>;
-        def "E" # eew # "FF_V_" # LInfo # "_MASK" :
-          VPseudoUSLoadMask<vreg, eew, true>,
+        def "E" # eew # "FF_V_" # LInfo # "_MASK":
+          VPseudoUSLoadFFMask<vreg, eew>,
           VLFSched<eew>;
       }
     }
@@ -1599,8 +1696,7 @@ multiclass VPseudoFFLoad {
 multiclass VPseudoLoadMask {
   foreach mti = AllMasks in {
     let VLMul = mti.LMul.value in {
-      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*isFF*/0,
-                                               /*DummyMask*/0>;
+      def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*DummyMask*/0>;
     }
   }
 }
@@ -2795,20 +2891,38 @@ multiclass VPseudoVNCVTD_W {
               Sched<[WriteVFNCvtFToFV, ReadVFNCvtFToFV, ReadVMask]>;
 }
 
-multiclass VPseudoUSSegLoad<bit isFF> {
+multiclass VPseudoUSSegLoad {
   foreach eew = EEWList in {
     foreach lmul = MxSet<eew>.m in {
       defvar LInfo = lmul.MX;
       let VLMul = lmul.value in {
         foreach nf = NFSet<lmul>.L in {
           defvar vreg = SegRegClass<lmul, nf>.RC;
-          defvar FFStr = !if(isFF, "FF", "");
-          def nf # "E" # eew # FFStr # "_V_" # LInfo :
-            VPseudoUSSegLoadNoMask<vreg, eew, nf, isFF>;
-          def nf # "E" # eew # FFStr # "_V_" # LInfo # "_TU" :
-            VPseudoUSSegLoadNoMaskTU<vreg, eew, nf, isFF>;
-          def nf # "E" # eew # FFStr # "_V_" # LInfo # "_MASK" :
-            VPseudoUSSegLoadMask<vreg, eew, nf, isFF>;
+          def nf # "E" # eew # "_V_" # LInfo :
+            VPseudoUSSegLoadNoMask<vreg, eew, nf>;
+          def nf # "E" # eew # "_V_" # LInfo # "_TU" :
+            VPseudoUSSegLoadNoMaskTU<vreg, eew, nf>;
+          def nf # "E" # eew # "_V_" # LInfo # "_MASK" :
+            VPseudoUSSegLoadMask<vreg, eew, nf>;
+        }
+      }
+    }
+  }
+}
+
+multiclass VPseudoUSSegLoadFF {
+  foreach eew = EEWList in {
+    foreach lmul = MxSet<eew>.m in {
+      defvar LInfo = lmul.MX;
+      let VLMul = lmul.value in {
+        foreach nf = NFSet<lmul>.L in {
+          defvar vreg = SegRegClass<lmul, nf>.RC;
+          def nf # "E" # eew # "FF_V_" # LInfo :
+            VPseudoUSSegLoadFFNoMask<vreg, eew, nf>;
+          def nf # "E" # eew # "FF_V_" # LInfo # "_TU" :
+            VPseudoUSSegLoadFFNoMaskTU<vreg, eew, nf>;
+          def nf # "E" # eew # "FF_V_" # LInfo # "_MASK" :
+            VPseudoUSSegLoadFFMask<vreg, eew, nf>;
         }
       }
     }
@@ -4300,7 +4414,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
 
 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
     Uses = [VL] in
-def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins ixlenimm:$vtype), []>;
+def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>;
 
 let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in {
   def PseudoVSPILL_M1 : VPseudo<VS1R_V, V_M1, (outs), (ins VR:$rs1, GPR:$rs2)>;
@@ -4393,7 +4507,7 @@ defm PseudoVL : VPseudoFFLoad;
 //===----------------------------------------------------------------------===//
 // 7.8. Vector Load/Store Segment Instructions
 //===----------------------------------------------------------------------===//
-defm PseudoVLSEG : VPseudoUSSegLoad</*isFF=*/false>;
+defm PseudoVLSEG : VPseudoUSSegLoad;
 defm PseudoVLSSEG : VPseudoSSegLoad;
 defm PseudoVLOXSEG : VPseudoISegLoad</*Ordered=*/true>;
 defm PseudoVLUXSEG : VPseudoISegLoad</*Ordered=*/false>;
@@ -4403,8 +4517,9 @@ defm PseudoVSOXSEG : VPseudoISegStore</*Ordered=*/true>;
 defm PseudoVSUXSEG : VPseudoISegStore</*Ordered=*/false>;
 
 // vlseg<nf>e<eew>ff.v may update VL register
-let hasSideEffects = 1, Defs = [VL] in
-defm PseudoVLSEG : VPseudoUSSegLoad</*isFF=*/true>;
+let hasSideEffects = 1, Defs = [VL] in {
+defm PseudoVLSEG : VPseudoUSSegLoadFF;
+}
 
 //===----------------------------------------------------------------------===//
 // 12. Vector Integer Arithmetic Instructions

diff  --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
index 9209744696fb7..e341f82db95da 100644
--- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
@@ -158,12 +158,16 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
   if (RISCVII::hasSEWOp(TSFlags))
     --NumOps;
 
+  bool hasVLOutput = isFaultFirstLoad(*MI);
   for (unsigned OpNo = 0; OpNo != NumOps; ++OpNo) {
     const MachineOperand &MO = MI->getOperand(OpNo);
+    // Skip vl ouput. It should be the second output.
+    if (hasVLOutput && OpNo == 1)
+      continue;
 
     // Skip merge op. It should be the first operand after the result.
-    if (RISCVII::hasMergeOp(TSFlags) && OpNo == 1) {
-      assert(MI->getNumExplicitDefs() == 1);
+    if (RISCVII::hasMergeOp(TSFlags) && OpNo == 1U + hasVLOutput) {
+      assert(MI->getNumExplicitDefs() == 1U + hasVLOutput);
       continue;
     }
 
@@ -210,16 +214,6 @@ bool llvm::lowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
   if (lowerRISCVVMachineInstrToMCInst(MI, OutMI))
     return false;
 
-  // Only need the output operand when lower PseudoReadVL from MI to MCInst.
-  if (MI->getOpcode() == RISCV::PseudoReadVL) {
-    OutMI.setOpcode(RISCV::CSRRS);
-    OutMI.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
-    OutMI.addOperand(
-        MCOperand::createImm(RISCVSysReg::lookupSysRegByName("VL")->Encoding));
-    OutMI.addOperand(MCOperand::createReg(RISCV::X0));
-    return false;
-  }
-
   OutMI.setOpcode(MI->getOpcode());
 
   for (const MachineOperand &MO : MI->operands()) {
@@ -248,6 +242,12 @@ bool llvm::lowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
         RISCVSysReg::lookupSysRegByName("VLENB")->Encoding));
     OutMI.addOperand(MCOperand::createReg(RISCV::X0));
     break;
+  case RISCV::PseudoReadVL:
+    OutMI.setOpcode(RISCV::CSRRS);
+    OutMI.addOperand(
+        MCOperand::createImm(RISCVSysReg::lookupSysRegByName("VL")->Encoding));
+    OutMI.addOperand(MCOperand::createReg(RISCV::X0));
+    break;
   }
   return false;
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll
deleted file mode 100644
index 7bb62cd1e9781..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll
+++ /dev/null
@@ -1,1891 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \
-; RUN:   -target-abi=ilp32 | FileCheck %s
-declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, i32);
-declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32);
-declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>*, i32);
-declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>*, i32);
-declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32);
-declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32);
-declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>*, i32);
-declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>*, i32);
-declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i32);
-declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32);
-declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, i32);
-declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>*, i32);
-declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>*, i32);
-declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>*, i32);
-declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>*, i32);
-declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>*, i32);
-declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8>, <vscale x 8 x i8>*, <vscale x 8 x i1>, i32, i32 immarg)
-declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8>, <vscale x 16 x i8>*, <vscale x 16 x i1>, i32, i32 immarg)
-declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8>, <vscale x 32 x i8>*, <vscale x 32 x i1>, i32, i32 immarg)
-declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8>, <vscale x 64 x i8>*, <vscale x 64 x i1>, i32, i32 immarg)
-declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16>, <vscale x 4 x i16>*, <vscale x 4 x i1>, i32, i32 immarg)
-declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16>, <vscale x 8 x i16>*, <vscale x 8 x i1>, i32, i32 immarg)
-declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16>, <vscale x 16 x i16>*, <vscale x 16 x i1>, i32, i32 immarg)
-declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16>, <vscale x 32 x i16>*, <vscale x 32 x i1>, i32, i32 immarg)
-declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, <vscale x 2 x i1>, i32, i32 immarg)
-declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, <vscale x 4 x i1>, i32, i32 immarg)
-declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, <vscale x 8 x i1>, i32, i32 immarg)
-declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>*, <vscale x 16 x i1>, i32, i32 immarg)
-declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64>, <vscale x 1 x i64>*, <vscale x 1 x i1>, i32, i32 immarg)
-declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64>, <vscale x 2 x i64>*, <vscale x 2 x i1>, i32, i32 immarg)
-declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64>, <vscale x 4 x i64>*, <vscale x 4 x i1>, i32, i32 immarg)
-declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64>, <vscale x 8 x i64>*, <vscale x 8 x i1>, i32, i32 immarg)
-
-define i32 @vleffe8m1(<vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2(<vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4(<vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8(<vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8> undef, <vscale x 64 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m1_tu(<vscale x 8 x i8> %merge, <vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %merge, <vscale x 8 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2_tu(<vscale x 16 x i8> %merge, <vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8> %merge, <vscale x 16 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4_tu(<vscale x 32 x i8> %merge, <vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8> %merge, <vscale x 32 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8_tu(<vscale x 64 x i8> %merge, <vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8> %merge, <vscale x 64 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m1_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4_tumu(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8_tumu(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m1_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4_tamu(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8_tamu(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m1_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4_tuma(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8_tuma(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 131 /* e8, m8, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m1_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4_tama(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8_tama(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 195 /* e8, m8, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1(<vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_:%[0-9]+]]:vr = PseudoVLE16FF_V_M1 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2(<vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4(<vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8(<vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16> undef, <vscale x 32 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1_tu(<vscale x 4 x i16> %merge, <vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE16FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16> %merge, <vscale x 4 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2_tu(<vscale x 8 x i16> %merge, <vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16> %merge, <vscale x 8 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4_tu(<vscale x 16 x i16> %merge, <vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16> %merge, <vscale x 16 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8_tu(<vscale x 32 x i16> %merge, <vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16> %merge, <vscale x 32 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8_tumu(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8_tamu(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8_tuma(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 139 /* e16, m8, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8_tama(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 203 /* e16, m8, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1(<vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_:%[0-9]+]]:vr = PseudoVLE32FF_V_M1 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2(<vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4(<vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8(<vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1_tu(<vscale x 2 x i32> %merge, <vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE32FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> %merge, <vscale x 2 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2_tu(<vscale x 4 x i32> %merge, <vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32> %merge, <vscale x 4 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4_tu(<vscale x 8 x i32> %merge, <vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32> %merge, <vscale x 8 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8_tu(<vscale x 16 x i32> %merge, <vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32> %merge, <vscale x 16 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1_tumu(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1_tamu(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1_tuma(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 146 /* e32, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 147 /* e32, m8, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1_tama(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 210 /* e32, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 211 /* e32, m8, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1(<vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_:%[0-9]+]]:vr = PseudoVLE64FF_V_M1 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2(<vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4(<vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8(<vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1_tu(<vscale x 1 x i64> %merge, <vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE64FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64> %merge, <vscale x 1 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2_tu(<vscale x 2 x i64> %merge, <vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64> %merge, <vscale x 2 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4_tu(<vscale x 4 x i64> %merge, <vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64> %merge, <vscale x 4 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8_tu(<vscale x 8 x i64> %merge, <vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64> %merge, <vscale x 8 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1_tumu(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2_tumu(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1_tamu(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2_tamu(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1_tuma(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2_tuma(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 155 /* e64, m8, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1_tama(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2_tama(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 219 /* e64, m8, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll
deleted file mode 100644
index 7bb62cd1e9781..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll
+++ /dev/null
@@ -1,1891 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \
-; RUN:   -target-abi=ilp32 | FileCheck %s
-declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, i32);
-declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32);
-declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>*, i32);
-declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>*, i32);
-declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32);
-declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32);
-declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>*, i32);
-declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>*, i32);
-declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i32);
-declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32);
-declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, i32);
-declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>*, i32);
-declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>*, i32);
-declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>*, i32);
-declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>*, i32);
-declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>*, i32);
-declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8>, <vscale x 8 x i8>*, <vscale x 8 x i1>, i32, i32 immarg)
-declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8>, <vscale x 16 x i8>*, <vscale x 16 x i1>, i32, i32 immarg)
-declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8>, <vscale x 32 x i8>*, <vscale x 32 x i1>, i32, i32 immarg)
-declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8>, <vscale x 64 x i8>*, <vscale x 64 x i1>, i32, i32 immarg)
-declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16>, <vscale x 4 x i16>*, <vscale x 4 x i1>, i32, i32 immarg)
-declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16>, <vscale x 8 x i16>*, <vscale x 8 x i1>, i32, i32 immarg)
-declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16>, <vscale x 16 x i16>*, <vscale x 16 x i1>, i32, i32 immarg)
-declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16>, <vscale x 32 x i16>*, <vscale x 32 x i1>, i32, i32 immarg)
-declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, <vscale x 2 x i1>, i32, i32 immarg)
-declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, <vscale x 4 x i1>, i32, i32 immarg)
-declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, <vscale x 8 x i1>, i32, i32 immarg)
-declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>*, <vscale x 16 x i1>, i32, i32 immarg)
-declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64>, <vscale x 1 x i64>*, <vscale x 1 x i1>, i32, i32 immarg)
-declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64>, <vscale x 2 x i64>*, <vscale x 2 x i1>, i32, i32 immarg)
-declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64>, <vscale x 4 x i64>*, <vscale x 4 x i1>, i32, i32 immarg)
-declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64>, <vscale x 8 x i64>*, <vscale x 8 x i1>, i32, i32 immarg)
-
-define i32 @vleffe8m1(<vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2(<vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4(<vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8(<vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8> undef, <vscale x 64 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m1_tu(<vscale x 8 x i8> %merge, <vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %merge, <vscale x 8 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2_tu(<vscale x 16 x i8> %merge, <vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8> %merge, <vscale x 16 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4_tu(<vscale x 32 x i8> %merge, <vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8> %merge, <vscale x 32 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8_tu(<vscale x 64 x i8> %merge, <vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8> %merge, <vscale x 64 x i8>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m1_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4_tumu(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8_tumu(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m1_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4_tamu(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8_tamu(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m1_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4_tuma(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8_tuma(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 131 /* e8, m8, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m1_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m1_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m2_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m2_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m4_tama(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m4_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe8m8_tama(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe8m8_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 195 /* e8, m8, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1(<vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_:%[0-9]+]]:vr = PseudoVLE16FF_V_M1 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2(<vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4(<vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8(<vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16> undef, <vscale x 32 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1_tu(<vscale x 4 x i16> %merge, <vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE16FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16> %merge, <vscale x 4 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2_tu(<vscale x 8 x i16> %merge, <vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16> %merge, <vscale x 8 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4_tu(<vscale x 16 x i16> %merge, <vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16> %merge, <vscale x 16 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8_tu(<vscale x 32 x i16> %merge, <vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16> %merge, <vscale x 32 x i16>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8_tumu(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8_tamu(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8_tuma(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 139 /* e16, m8, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m1_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m1_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m2_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m2_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m4_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m4_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe16m8_tama(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe16m8_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 203 /* e16, m8, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1(<vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_:%[0-9]+]]:vr = PseudoVLE32FF_V_M1 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2(<vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4(<vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8(<vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1_tu(<vscale x 2 x i32> %merge, <vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE32FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> %merge, <vscale x 2 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2_tu(<vscale x 4 x i32> %merge, <vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32> %merge, <vscale x 4 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4_tu(<vscale x 8 x i32> %merge, <vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32> %merge, <vscale x 8 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8_tu(<vscale x 16 x i32> %merge, <vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32> %merge, <vscale x 16 x i32>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1_tumu(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1_tamu(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1_tuma(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 146 /* e32, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 147 /* e32, m8, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m1_tama(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m1_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m2_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m2_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m4_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m4_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 210 /* e32, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe32m8_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe32m8_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 211 /* e32, m8, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1(<vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_:%[0-9]+]]:vr = PseudoVLE64FF_V_M1 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2(<vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4(<vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8(<vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1_tu(<vscale x 1 x i64> %merge, <vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE64FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64> %merge, <vscale x 1 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2_tu(<vscale x 2 x i64> %merge, <vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64> %merge, <vscale x 2 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4_tu(<vscale x 4 x i64> %merge, <vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64> %merge, <vscale x 4 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8_tu(<vscale x 8 x i64> %merge, <vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8_tu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64> %merge, <vscale x 8 x i64>* %p, i32 %vl)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1_tumu(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2_tumu(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8_tumu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1_tamu(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2_tamu(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8_tamu
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1_tuma(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2_tuma(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8_tuma
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 155 /* e64, m8, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m1_tama(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m1_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m2_tama(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m2_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m4_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m4_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
-  ret i32 %1
-}
-
-define i32 @vleffe64m8_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
-  ; CHECK-LABEL: name: vleffe64m8_tama
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 219 /* e64, m8, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
-  ; CHECK-NEXT:   PseudoRET implicit $x10
-entry:
-  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
-  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
-  ret i32 %1
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
new file mode 100644
index 0000000000000..1cb7fc0e6c31e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll
@@ -0,0 +1,114 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s
+
+declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, i64)
+declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>*, <vscale x 8 x i1>, i64, i64 immarg)
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, i8* , i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64, i64)
+
+define i64 @test_vleff_nxv8i8(<vscale x 8 x i8> *%p, i64 %vl) {
+  ; CHECK-LABEL: name: test_vleff_nxv8i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_1]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8>* %p, i64 %vl)
+  %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1
+  ret i64 %1
+}
+
+define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %merge, <vscale x 8 x i8> *%p, i64 %vl) {
+  ; CHECK-LABEL: name: test_vleff_nxv8i8_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_TU1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_TU1]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %merge, <vscale x 8 x i8>* %p, i64 %vl)
+  %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1
+  ret i64 %1
+}
+
+define i64 @test_vleff_nxv8i8_mask(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, <vscale x 8 x i1> %m, i64 %vl) {
+  ; CHECK-LABEL: name: test_vleff_nxv8i8_mask
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $v0, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def dead $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_MASK1]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, <vscale x 8 x i1> %m, i64 %vl, i64 0)
+  %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1
+  ret i64 %1
+}
+
+define i64 @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, i8* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2
+  ret i64 %1
+}
+
+define i64 @test_vlseg2ff_nxv8i8_tu(<vscale x 8 x i8> %val, i8* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1 = REG_SEQUENCE [[COPY2]], %subreg.sub_vrm1_0, [[COPY2]], %subreg.sub_vrm1_1
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_TU:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_TU1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_TU [[REG_SEQUENCE]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def dead $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_TU1]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8> %val, i8* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2
+  ret i64 %1
+}
+
+define i64 @test_vlseg2ff_nxv8i8_mask(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_mask
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $v0, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY3]], %subreg.sub_vrm1_0, [[COPY3]], %subreg.sub_vrm1_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def dead $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2
+  ret i64 %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll
deleted file mode 100644
index 2fed18f47b6c1..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll
+++ /dev/null
@@ -1,732 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \
-; RUN:   -target-abi=ilp32d | FileCheck %s
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16* , i32)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i1>, i32, i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32* , i32)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i32, i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8* , i32)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i32, i32)
-declare {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64* , i32)
-declare {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i1>, i32, i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32* , i32)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32, i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16* , i32)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i32, i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8* , i32)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32, i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16* , i32)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32, i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32* , i32)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32, i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8* , i32)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32, i32)
-declare {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64* , i32)
-declare {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i1>, i32, i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16* , i32)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32, i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8* , i32)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32, i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8* , i32)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32, i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32* , i32)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i1>, i32, i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8* , i32)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i1>, i32, i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16* , i32)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32, i32)
-declare {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64* , i32)
-declare {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i1>, i32, i32)
-
-define void @test_vlseg2ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, i8* %base, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl, <vscale x 8 x i1> %mask, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %2, 2
-  store volatile i32 %3, i32* %outvl
-  %4 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
-  %5 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %4, 2
-  store volatile i32 %5, i32* %outvl
-  %6 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
-  %7 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %6, 2
-  store volatile i32 %7, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv16i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E8FF_V_M2 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, i8* %base, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %vl, <vscale x 16 x i1> %mask, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
-  store volatile i32 %3, i32* %outvl
-  %4 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
-  %5 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
-  store volatile i32 %5, i32* %outvl
-  %6 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
-  %7 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
-  store volatile i32 %7, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv32i8(i8* %base, i32 %vl, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv32i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E8FF_V_M4 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, i8* %base, i32 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i32 %vl, <vscale x 32 x i1> %mask, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv32i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %2, 2
-  store volatile i32 %3, i32* %outvl
-  %4 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl, i32 2)
-  %5 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %4, 2
-  store volatile i32 %5, i32* %outvl
-  %6 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
-  %7 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %6, 2
-  store volatile i32 %7, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16FF_V_M1 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, i16* %base, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl, <vscale x 4 x i1> %mask, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %2, 2
-  store volatile i32 %3, i32* %outvl
-  %4 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
-  %5 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %4, 2
-  store volatile i32 %5, i32* %outvl
-  %6 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
-  %7 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %6, 2
-  store volatile i32 %7, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E16FF_V_M2 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, i16* %base, i32 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %vl, <vscale x 8 x i1> %mask, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %2, 2
-  store volatile i32 %3, i32* %outvl
-  %4 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
-  %5 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %4, 2
-  store volatile i32 %5, i32* %outvl
-  %6 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
-  %7 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %6, 2
-  store volatile i32 %7, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv16i16(i16* %base, i32 %vl, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv16i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E16FF_V_M4 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, i16* %base, i32 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i32 %vl, <vscale x 16 x i1> %mask, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %2, 2
-  store volatile i32 %3, i32* %outvl
-  %4 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
-  %5 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %4, 2
-  store volatile i32 %5, i32* %outvl
-  %6 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
-  %7 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %6, 2
-  store volatile i32 %7, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv2i32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32FF_V_M1 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, i32* %base, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl, <vscale x 2 x i1> %mask, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %2, 2
-  store volatile i32 %3, i32* %outvl
-  %4 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
-  %5 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %4, 2
-  store volatile i32 %5, i32* %outvl
-  %6 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
-  %7 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %6, 2
-  store volatile i32 %7, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E32FF_V_M2 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, i32* %base, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %vl, <vscale x 4 x i1> %mask, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %2, 2
-  store volatile i32 %1, i32* %outvl
-  %4 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
-  %5 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %4, 2
-  store volatile i32 %1, i32* %outvl
-  %6 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
-  %7 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %6, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv1i64(i64* %base, i32 %vl, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv1i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64FF_V_M1 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, i64* %base, i32 %vl)
-  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i32 %vl, <vscale x 1 x i1> %mask, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv1i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
-  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} %2, 2
-  store volatile i32 %3, i32* %outvl
-  %4 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 2)
-  %5 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} %4, 2
-  store volatile i32 %5, i32* %outvl
-  %6 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
-  %7 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} %6, 2
-  store volatile i32 %7, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv2i64(i64* %base, i32 %vl, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv2i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E64FF_V_M2 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, i64* %base, i32 %vl)
-  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i32 %vl, <vscale x 2 x i1> %mask, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
-  %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} %2, 2
-  store volatile i32 %3, i32* %outvl
-  %4 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
-  %5 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} %4, 2
-  store volatile i32 %5, i32* %outvl
-  %6 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
-  %7 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} %6, 2
-  store volatile i32 %7, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv4i64(i64* %base, i32 %vl, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E64FF_V_M4 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, i64* %base, i32 %vl)
-  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv4i64(<vscale x 4 x i64> %val, i64* %base, i32 %vl, <vscale x 4 x i1> %mask, i32* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
-  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} %0, 2
-  store volatile i32 %1, i32* %outvl
-  %2 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
-  %3 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} %2, 2
-  store volatile i32 %3, i32* %outvl
-  %4 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
-  %5 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} %4, 2
-  store volatile i32 %5, i32* %outvl
-  %6 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
-  %7 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} %6, 2
-  store volatile i32 %7, i32* %outvl
-  ret void
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll
deleted file mode 100644
index 49ba44ed827dd..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll
+++ /dev/null
@@ -1,732 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel < %s \
-; RUN:   -target-abi=lp64d | FileCheck %s
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16* , i64)
-declare {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i1>, i64, i64)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32* , i64)
-declare {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i64, i64)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8* , i64)
-declare {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i64, i64)
-declare {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64* , i64)
-declare {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i1>, i64, i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg2ff.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32* , i64)
-declare {<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i64, i64)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16* , i64)
-declare {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i64, i64)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg2ff.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8* , i64)
-declare {<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i64, i64)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg2ff.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16* , i64)
-declare {<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i64, i64)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32* , i64)
-declare {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i64, i64)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8* , i64)
-declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64, i64)
-declare {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64* , i64)
-declare {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i1>, i64, i64)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16* , i64)
-declare {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i64, i64)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg2ff.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8* , i64)
-declare {<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i64, i64)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg2ff.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8* , i64)
-declare {<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i64, i64)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>, i64} @llvm.riscv.vlseg2ff.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32* , i64)
-declare {<vscale x 8 x i32>,<vscale x 8 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i1>, i64, i64)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8* , i64)
-declare {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i1>, i64, i64)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg2ff.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16* , i64)
-declare {<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i64, i64)
-declare {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64* , i64)
-declare {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i1>, i64, i64)
-
-define void @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, i8* %base, i64 %vl)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %vl, <vscale x 8 x i1> %mask, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 0)
-  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
-  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %2, 2
-  store volatile i64 %3, i64* %outvl
-  %4 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 2)
-  %5 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %4, 2
-  store volatile i64 %5, i64* %outvl
-  %6 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
-  %7 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %6, 2
-  store volatile i64 %7, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv16i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E8FF_V_M2 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, i8* %base, i64 %vl)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %vl, <vscale x 16 x i1> %mask, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 0)
-  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
-  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} %0, 2
-  store volatile i64 %3, i64* %outvl
-  %4 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 2)
-  %5 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} %0, 2
-  store volatile i64 %5, i64* %outvl
-  %6 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 3)
-  %7 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} %0, 2
-  store volatile i64 %7, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv32i8(i8* %base, i64 %vl, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv32i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E8FF_V_M4 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, i8* %base, i64 %vl)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i64 %vl, <vscale x 32 x i1> %mask, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv32i8
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i64 %vl, i64 0)
-  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
-  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} %2, 2
-  store volatile i64 %3, i64* %outvl
-  %4 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i64 %vl, i64 2)
-  %5 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} %4, 2
-  store volatile i64 %5, i64* %outvl
-  %6 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i64 %vl, i64 3)
-  %7 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} %6, 2
-  store volatile i64 %7, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16FF_V_M1 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, i16* %base, i64 %vl)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %vl, <vscale x 4 x i1> %mask, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 0)
-  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
-  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} %2, 2
-  store volatile i64 %3, i64* %outvl
-  %4 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 2)
-  %5 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} %4, 2
-  store volatile i64 %5, i64* %outvl
-  %6 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
-  %7 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} %6, 2
-  store volatile i64 %7, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E16FF_V_M2 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, i16* %base, i64 %vl)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %vl, <vscale x 8 x i1> %mask, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 0)
-  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
-  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} %2, 2
-  store volatile i64 %3, i64* %outvl
-  %4 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 2)
-  %5 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} %4, 2
-  store volatile i64 %5, i64* %outvl
-  %6 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
-  %7 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} %6, 2
-  store volatile i64 %7, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv16i16(i16* %base, i64 %vl, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv16i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E16FF_V_M4 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, i16* %base, i64 %vl)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i64 %vl, <vscale x 16 x i1> %mask, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i16
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 0)
-  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
-  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} %2, 2
-  store volatile i64 %3, i64* %outvl
-  %4 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 2)
-  %5 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} %4, 2
-  store volatile i64 %5, i64* %outvl
-  %6 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 3)
-  %7 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} %6, 2
-  store volatile i64 %7, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv2i32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32FF_V_M1 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, i32* %base, i64 %vl)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %vl, <vscale x 2 x i1> %mask, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 0)
-  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
-  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} %2, 2
-  store volatile i64 %3, i64* %outvl
-  %4 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 2)
-  %5 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} %4, 2
-  store volatile i64 %5, i64* %outvl
-  %6 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
-  %7 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} %6, 2
-  store volatile i64 %7, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E32FF_V_M2 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, i32* %base, i64 %vl)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %vl, <vscale x 4 x i1> %mask, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i32
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 0)
-  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
-  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} %2, 2
-  store volatile i64 %1, i64* %outvl
-  %4 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 2)
-  %5 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} %4, 2
-  store volatile i64 %1, i64* %outvl
-  %6 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
-  %7 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} %6, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv1i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64FF_V_M1 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, i64* %base, i64 %vl)
-  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %vl, <vscale x 1 x i1> %mask, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv1i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 0)
-  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
-  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} %2, 2
-  store volatile i64 %3, i64* %outvl
-  %4 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 2)
-  %5 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} %4, 2
-  store volatile i64 %5, i64* %outvl
-  %6 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
-  %7 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} %6, 2
-  store volatile i64 %7, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv2i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E64FF_V_M2 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, i64* %base, i64 %vl)
-  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %vl, <vscale x 2 x i1> %mask, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 0)
-  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
-  %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} %2, 2
-  store volatile i64 %3, i64* %outvl
-  %4 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 2)
-  %5 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} %4, 2
-  store volatile i64 %5, i64* %outvl
-  %6 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
-  %7 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} %6, 2
-  store volatile i64 %7, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_nxv4i64(i64* %base, i64 %vl, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E64FF_V_M4 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, i64* %base, i64 %vl)
-  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  ret void
-}
-
-define void @test_vlseg2ff_mask_nxv4i64(<vscale x 4 x i64> %val, i64* %base, i64 %vl, <vscale x 4 x i1> %mask, i64* %outvl) {
-  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i64
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
-  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl
-  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
-  ; CHECK-NEXT:   PseudoRET
-entry:
-  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 0)
-  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} %0, 2
-  store volatile i64 %1, i64* %outvl
-  %2 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
-  %3 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} %2, 2
-  store volatile i64 %3, i64* %outvl
-  %4 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 2)
-  %5 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} %4, 2
-  store volatile i64 %5, i64* %outvl
-  %6 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
-  %7 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} %6, 2
-  store volatile i64 %7, i64* %outvl
-  ret void
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
index 962900c424687..70a7036ee8066 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
@@ -82,11 +82,11 @@ body:             |
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
     ; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
-    ; CHECK-NEXT: $v4m4 = PseudoVLE32FF_V_M4 $x16, $noreg, 5 /* e32 */, implicit-def $vl
+    ; CHECK-NEXT: $v4m4, $x0 = PseudoVLE32FF_V_M4 $x16, $noreg, 5 /* e32 */, implicit-def $vl
     ; CHECK-NEXT: $v12m4 = PseudoVMV4R_V $v28m4
     $x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
     $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype
-    $v4m4 = PseudoVLE32FF_V_M4 $x16, $noreg, 5, implicit-def $vl
+    $v4m4,$x0 = PseudoVLE32FF_V_M4 $x16, $noreg, 5, implicit-def $vl
     $v12m4 = COPY $v28m4
 ...
 ---


        


More information about the llvm-commits mailing list