[llvm] 4537aae - [RISCV] Make PseudoReadVL have the vtypes of the corresponding VLEFF/VLSEGFF.

Yeting Kuo via llvm-commits llvm-commits at lists.llvm.org
Tue May 10 23:08:05 PDT 2022


Author: Yeting Kuo
Date: 2022-05-11T14:07:58+08:00
New Revision: 4537aae0d57e17c217c192d8977012ba475b130c

URL: https://github.com/llvm/llvm-project/commit/4537aae0d57e17c217c192d8977012ba475b130c
DIFF: https://github.com/llvm/llvm-project/commit/4537aae0d57e17c217c192d8977012ba475b130c.diff

LOG: [RISCV] Make PseudoReadVL have the vtypes of the corresponding VLEFF/VLSEGFF.

The patch make PseudoReadVL have the vtypes of the corresponding VLEFF/VLSEGFF.
It's useful to get the vtypes of locations of PseudoReadVL without finding the
corresponding VLEFF/VLSEGFF.
It could simplify optimizations in RISCVInsertVSETVLI like D123581.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D125199

Added: 
    llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll
    llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll
    llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll
    llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVMCInstLower.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 7fae031104f3..0115c602e66d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -357,7 +357,8 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
   unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain.
   MVT VT = Node->getSimpleValueType(0);
   MVT XLenVT = Subtarget->getXLenVT();
-  unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+  unsigned SEW = VT.getScalarSizeInBits();
+  unsigned Log2SEW = Log2_32(SEW);
   RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
 
   unsigned CurOp = 2;
@@ -379,8 +380,18 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) {
                             Log2SEW, static_cast<unsigned>(LMUL));
   MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped,
                                                MVT::Other, MVT::Glue, Operands);
+  bool TailAgnostic = true;
+  bool MaskAgnostic = false;
+  if (IsMasked) {
+    uint64_t Policy = Node->getConstantOperandVal(Node->getNumOperands() - 1);
+    TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
+    MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
+  }
+  unsigned VType =
+      RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic);
+  SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT);
   SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
-                                          /*Glue*/ SDValue(Load, 2));
+                                          VTypeOp, /*Glue*/ SDValue(Load, 2));
 
   if (auto *MemOp = dyn_cast<MemSDNode>(Node))
     CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});
@@ -1342,7 +1353,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
       bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask;
 
       MVT VT = Node->getSimpleValueType(0);
-      unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
+      unsigned SEW = VT.getScalarSizeInBits();
+      unsigned Log2SEW = Log2_32(SEW);
 
       unsigned CurOp = 2;
       // Masked intrinsic only have TU version pseduo instructions.
@@ -1365,8 +1377,20 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
       MachineSDNode *Load =
           CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0),
                                  MVT::Other, MVT::Glue, Operands);
-      SDNode *ReadVL = CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT,
-                                              /*Glue*/ SDValue(Load, 2));
+      bool TailAgnostic = !IsTU;
+      bool MaskAgnostic = false;
+      if (IsMasked) {
+        uint64_t Policy =
+            Node->getConstantOperandVal(Node->getNumOperands() - 1);
+        TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
+        MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
+      }
+      unsigned VType =
+          RISCVVType::encodeVTYPE(LMUL, SEW, TailAgnostic, MaskAgnostic);
+      SDValue VTypeOp = CurDAG->getTargetConstant(VType, DL, XLenVT);
+      SDNode *ReadVL =
+          CurDAG->getMachineNode(RISCV::PseudoReadVL, DL, XLenVT, VTypeOp,
+                                 /*Glue*/ SDValue(Load, 2));
 
       if (auto *MemOp = dyn_cast<MemSDNode>(Node))
         CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()});

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index 5c35fa964480..f83cefb92d63 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -1352,13 +1352,14 @@ std::string RISCVInstrInfo::createMIROperandComment(
 
   uint64_t TSFlags = MI.getDesc().TSFlags;
 
-  // Print the full VType operand of vsetvli/vsetivli instructions, and the SEW
-  // operand of vector codegen pseudos.
-  if ((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
-       MI.getOpcode() == RISCV::PseudoVSETVLI ||
-       MI.getOpcode() == RISCV::PseudoVSETIVLI ||
-       MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
-      OpIdx == 2) {
+  // Print the full VType operand of vsetvli/vsetivli and PseudoReadVL
+  // instructions, and the SEW operand of vector codegen pseudos.
+  if (((MI.getOpcode() == RISCV::VSETVLI || MI.getOpcode() == RISCV::VSETIVLI ||
+        MI.getOpcode() == RISCV::PseudoVSETVLI ||
+        MI.getOpcode() == RISCV::PseudoVSETIVLI ||
+        MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
+       OpIdx == 2) ||
+      (MI.getOpcode() == RISCV::PseudoReadVL && OpIdx == 1)) {
     unsigned Imm = MI.getOperand(OpIdx).getImm();
     RISCVVType::printVType(Imm, OS);
   } else if (RISCVII::hasSEWOp(TSFlags)) {

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 82f87cbb07aa..558b284705d3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -4240,7 +4240,7 @@ let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1 in {
 
 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 1,
     Uses = [VL] in
-def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins), []>;
+def PseudoReadVL : Pseudo<(outs GPR:$rd), (ins ixlenimm:$vtype), []>;
 
 let hasSideEffects = 0, mayLoad = 0, mayStore = 1, isCodeGenOnly = 1 in {
   def PseudoVSPILL_M1 : VPseudo<VS1R_V, V_M1, (outs), (ins VR:$rs1, GPR:$rs2)>;

diff  --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
index 4b34bbaea97e..9209744696fb 100644
--- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
@@ -210,6 +210,16 @@ bool llvm::lowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
   if (lowerRISCVVMachineInstrToMCInst(MI, OutMI))
     return false;
 
+  // Only need the output operand when lower PseudoReadVL from MI to MCInst.
+  if (MI->getOpcode() == RISCV::PseudoReadVL) {
+    OutMI.setOpcode(RISCV::CSRRS);
+    OutMI.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
+    OutMI.addOperand(
+        MCOperand::createImm(RISCVSysReg::lookupSysRegByName("VL")->Encoding));
+    OutMI.addOperand(MCOperand::createReg(RISCV::X0));
+    return false;
+  }
+
   OutMI.setOpcode(MI->getOpcode());
 
   for (const MachineOperand &MO : MI->operands()) {
@@ -238,12 +248,6 @@ bool llvm::lowerRISCVMachineInstrToMCInst(const MachineInstr *MI, MCInst &OutMI,
         RISCVSysReg::lookupSysRegByName("VLENB")->Encoding));
     OutMI.addOperand(MCOperand::createReg(RISCV::X0));
     break;
-  case RISCV::PseudoReadVL:
-    OutMI.setOpcode(RISCV::CSRRS);
-    OutMI.addOperand(
-        MCOperand::createImm(RISCVSysReg::lookupSysRegByName("VL")->Encoding));
-    OutMI.addOperand(MCOperand::createReg(RISCV::X0));
-    break;
   }
   return false;
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll
new file mode 100644
index 000000000000..7bb62cd1e978
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32-readvl.ll
@@ -0,0 +1,1891 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \
+; RUN:   -target-abi=ilp32 | FileCheck %s
+declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, i32);
+declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32);
+declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>*, i32);
+declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>*, i32);
+declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32);
+declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32);
+declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>*, i32);
+declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>*, i32);
+declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i32);
+declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32);
+declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, i32);
+declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>*, i32);
+declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>*, i32);
+declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>*, i32);
+declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>*, i32);
+declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>*, i32);
+declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8>, <vscale x 8 x i8>*, <vscale x 8 x i1>, i32, i32 immarg)
+declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8>, <vscale x 16 x i8>*, <vscale x 16 x i1>, i32, i32 immarg)
+declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8>, <vscale x 32 x i8>*, <vscale x 32 x i1>, i32, i32 immarg)
+declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8>, <vscale x 64 x i8>*, <vscale x 64 x i1>, i32, i32 immarg)
+declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16>, <vscale x 4 x i16>*, <vscale x 4 x i1>, i32, i32 immarg)
+declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16>, <vscale x 8 x i16>*, <vscale x 8 x i1>, i32, i32 immarg)
+declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16>, <vscale x 16 x i16>*, <vscale x 16 x i1>, i32, i32 immarg)
+declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16>, <vscale x 32 x i16>*, <vscale x 32 x i1>, i32, i32 immarg)
+declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, <vscale x 2 x i1>, i32, i32 immarg)
+declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, <vscale x 4 x i1>, i32, i32 immarg)
+declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, <vscale x 8 x i1>, i32, i32 immarg)
+declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>*, <vscale x 16 x i1>, i32, i32 immarg)
+declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64>, <vscale x 1 x i64>*, <vscale x 1 x i1>, i32, i32 immarg)
+declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64>, <vscale x 2 x i64>*, <vscale x 2 x i1>, i32, i32 immarg)
+declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64>, <vscale x 4 x i64>*, <vscale x 4 x i1>, i32, i32 immarg)
+declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64>, <vscale x 8 x i64>*, <vscale x 8 x i1>, i32, i32 immarg)
+
+define i32 @vleffe8m1(<vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2(<vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4(<vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8(<vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8> undef, <vscale x 64 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m1_tu(<vscale x 8 x i8> %merge, <vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %merge, <vscale x 8 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2_tu(<vscale x 16 x i8> %merge, <vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8> %merge, <vscale x 16 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4_tu(<vscale x 32 x i8> %merge, <vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8> %merge, <vscale x 32 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8_tu(<vscale x 64 x i8> %merge, <vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8> %merge, <vscale x 64 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m1_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4_tumu(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8_tumu(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m1_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4_tamu(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8_tamu(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m1_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4_tuma(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8_tuma(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 131 /* e8, m8, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m1_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4_tama(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8_tama(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 195 /* e8, m8, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1(<vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_:%[0-9]+]]:vr = PseudoVLE16FF_V_M1 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2(<vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4(<vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8(<vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16> undef, <vscale x 32 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1_tu(<vscale x 4 x i16> %merge, <vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE16FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16> %merge, <vscale x 4 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2_tu(<vscale x 8 x i16> %merge, <vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16> %merge, <vscale x 8 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4_tu(<vscale x 16 x i16> %merge, <vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16> %merge, <vscale x 16 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8_tu(<vscale x 32 x i16> %merge, <vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16> %merge, <vscale x 32 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8_tumu(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8_tamu(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8_tuma(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 139 /* e16, m8, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8_tama(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 203 /* e16, m8, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1(<vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_:%[0-9]+]]:vr = PseudoVLE32FF_V_M1 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2(<vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4(<vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8(<vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1_tu(<vscale x 2 x i32> %merge, <vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE32FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> %merge, <vscale x 2 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2_tu(<vscale x 4 x i32> %merge, <vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32> %merge, <vscale x 4 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4_tu(<vscale x 8 x i32> %merge, <vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32> %merge, <vscale x 8 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8_tu(<vscale x 16 x i32> %merge, <vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32> %merge, <vscale x 16 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1_tumu(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1_tamu(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1_tuma(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 146 /* e32, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 147 /* e32, m8, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1_tama(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 210 /* e32, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 211 /* e32, m8, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1(<vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_:%[0-9]+]]:vr = PseudoVLE64FF_V_M1 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2(<vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4(<vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8(<vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1_tu(<vscale x 1 x i64> %merge, <vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE64FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64> %merge, <vscale x 1 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2_tu(<vscale x 2 x i64> %merge, <vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64> %merge, <vscale x 2 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4_tu(<vscale x 4 x i64> %merge, <vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64> %merge, <vscale x 4 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8_tu(<vscale x 8 x i64> %merge, <vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64> %merge, <vscale x 8 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1_tumu(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2_tumu(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1_tamu(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2_tamu(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1_tuma(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2_tuma(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 155 /* e64, m8, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1_tama(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2_tama(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 219 /* e64, m8, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll
new file mode 100644
index 000000000000..7bb62cd1e978
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64-readvl.ll
@@ -0,0 +1,1891 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \
+; RUN:   -target-abi=ilp32 | FileCheck %s
+declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, i32);
+declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32);
+declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>*, i32);
+declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>*, i32);
+declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, i32);
+declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32);
+declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>*, i32);
+declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>*, i32);
+declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, i32);
+declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32);
+declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, i32);
+declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>*, i32);
+declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>*, i32);
+declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>*, i32);
+declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>*, i32);
+declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>*, i32);
+declare { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8>, <vscale x 8 x i8>*, <vscale x 8 x i1>, i32, i32 immarg)
+declare { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8>, <vscale x 16 x i8>*, <vscale x 16 x i1>, i32, i32 immarg)
+declare { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8>, <vscale x 32 x i8>*, <vscale x 32 x i1>, i32, i32 immarg)
+declare { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8>, <vscale x 64 x i8>*, <vscale x 64 x i1>, i32, i32 immarg)
+declare { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16>, <vscale x 4 x i16>*, <vscale x 4 x i1>, i32, i32 immarg)
+declare { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16>, <vscale x 8 x i16>*, <vscale x 8 x i1>, i32, i32 immarg)
+declare { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16>, <vscale x 16 x i16>*, <vscale x 16 x i1>, i32, i32 immarg)
+declare { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16>, <vscale x 32 x i16>*, <vscale x 32 x i1>, i32, i32 immarg)
+declare { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, <vscale x 2 x i1>, i32, i32 immarg)
+declare { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, <vscale x 4 x i1>, i32, i32 immarg)
+declare { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, <vscale x 8 x i1>, i32, i32 immarg)
+declare { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32>, <vscale x 16 x i32>*, <vscale x 16 x i1>, i32, i32 immarg)
+declare { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64>, <vscale x 1 x i64>*, <vscale x 1 x i1>, i32, i32 immarg)
+declare { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64>, <vscale x 2 x i64>*, <vscale x 2 x i1>, i32, i32 immarg)
+declare { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64>, <vscale x 4 x i64>*, <vscale x 4 x i1>, i32, i32 immarg)
+declare { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64>, <vscale x 8 x i64>*, <vscale x 8 x i1>, i32, i32 immarg)
+
+define i32 @vleffe8m1(<vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr = PseudoVLE8FF_V_M1 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2(<vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4(<vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8(<vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8 [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8> undef, <vscale x 64 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m1_tu(<vscale x 8 x i8> %merge, <vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE8FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %merge, <vscale x 8 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2_tu(<vscale x 16 x i8> %merge, <vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE8FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.nxv16i8(<vscale x 16 x i8> %merge, <vscale x 16 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4_tu(<vscale x 32 x i8> %merge, <vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE8FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.nxv32i8(<vscale x 32 x i8> %merge, <vscale x 32 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8_tu(<vscale x 64 x i8> %merge, <vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE8FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.nxv64i8(<vscale x 64 x i8> %merge, <vscale x 64 x i8>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m1_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4_tumu(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8_tumu(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 3 /* e8, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m1_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4_tamu(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8_tamu(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 67 /* e8, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m1_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4_tuma(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8_tuma(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 131 /* e8, m8, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m1_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i8> %maskedoff, <vscale x 8 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m1_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE8FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i8>, i32 } @llvm.riscv.vleff.mask.nxv8i8.i32(<vscale x 8 x i8> %maskedoff, <vscale x 8 x i8>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 8 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m2_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i8> %maskedoff, <vscale x 16 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m2_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE8FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i8>, i32 } @llvm.riscv.vleff.mask.nxv16i8.i32(<vscale x 16 x i8> %maskedoff, <vscale x 16 x i8>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 16 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m4_tama(<vscale x 32 x i1> %mask, <vscale x 32 x i8> %maskedoff, <vscale x 32 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m4_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE8FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i8>, i32 } @llvm.riscv.vleff.mask.nxv32i8.i32(<vscale x 32 x i8> %maskedoff, <vscale x 32 x i8>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 32 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe8m8_tama(<vscale x 64 x i1> %mask, <vscale x 64 x i8> %maskedoff, <vscale x 64 x i8> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe8m8_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE8FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 195 /* e8, m8, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 64 x i8>, i32 } @llvm.riscv.vleff.mask.nxv64i8.i32(<vscale x 64 x i8> %maskedoff, <vscale x 64 x i8>* %p, <vscale x 64 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 64 x i8>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1(<vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_:%[0-9]+]]:vr = PseudoVLE16FF_V_M1 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2(<vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4(<vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8(<vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8 [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16> undef, <vscale x 32 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1_tu(<vscale x 4 x i16> %merge, <vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE16FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.nxv4i16(<vscale x 4 x i16> %merge, <vscale x 4 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2_tu(<vscale x 8 x i16> %merge, <vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE16FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.nxv8i16(<vscale x 8 x i16> %merge, <vscale x 8 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4_tu(<vscale x 16 x i16> %merge, <vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE16FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.nxv16i16(<vscale x 16 x i16> %merge, <vscale x 16 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8_tu(<vscale x 32 x i16> %merge, <vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE16FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.nxv32i16(<vscale x 32 x i16> %merge, <vscale x 32 x i16>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8_tumu(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 11 /* e16, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8_tamu(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 75 /* e16, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8_tuma(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 139 /* e16, m8, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m1_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i16> %maskedoff, <vscale x 4 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m1_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE16FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i16>, i32 } @llvm.riscv.vleff.mask.nxv4i16.i32(<vscale x 4 x i16> %maskedoff, <vscale x 4 x i16>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 4 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m2_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i16> %maskedoff, <vscale x 8 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m2_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE16FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i16>, i32 } @llvm.riscv.vleff.mask.nxv8i16.i32(<vscale x 8 x i16> %maskedoff, <vscale x 8 x i16>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 8 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m4_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i16> %maskedoff, <vscale x 16 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m4_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE16FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i16>, i32 } @llvm.riscv.vleff.mask.nxv16i16.i32(<vscale x 16 x i16> %maskedoff, <vscale x 16 x i16>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 16 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe16m8_tama(<vscale x 32 x i1> %mask, <vscale x 32 x i16> %maskedoff, <vscale x 32 x i16> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe16m8_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE16FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE16FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 203 /* e16, m8, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 32 x i16>, i32 } @llvm.riscv.vleff.mask.nxv32i16.i32(<vscale x 32 x i16> %maskedoff, <vscale x 32 x i16>* %p, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 32 x i16>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1(<vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_:%[0-9]+]]:vr = PseudoVLE32FF_V_M1 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2(<vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4(<vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8(<vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8 [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1_tu(<vscale x 2 x i32> %merge, <vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE32FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.nxv2i32(<vscale x 2 x i32> %merge, <vscale x 2 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2_tu(<vscale x 4 x i32> %merge, <vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE32FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.nxv4i32(<vscale x 4 x i32> %merge, <vscale x 4 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4_tu(<vscale x 8 x i32> %merge, <vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE32FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.nxv8i32(<vscale x 8 x i32> %merge, <vscale x 8 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8_tu(<vscale x 16 x i32> %merge, <vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE32FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.nxv16i32(<vscale x 16 x i32> %merge, <vscale x 16 x i32>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1_tumu(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 18 /* e32, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8_tumu(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 19 /* e32, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1_tamu(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 82 /* e32, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8_tamu(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 83 /* e32, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1_tuma(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 146 /* e32, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8_tuma(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 147 /* e32, m8, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m1_tama(<vscale x 2 x i1> %mask, <vscale x 2 x i32> %maskedoff, <vscale x 2 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m1_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i32>, i32 } @llvm.riscv.vleff.mask.nxv2i32.i32(<vscale x 2 x i32> %maskedoff, <vscale x 2 x i32>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 2 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m2_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i32> %maskedoff, <vscale x 4 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m2_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE32FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i32>, i32 } @llvm.riscv.vleff.mask.nxv4i32.i32(<vscale x 4 x i32> %maskedoff, <vscale x 4 x i32>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 4 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m4_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i32> %maskedoff, <vscale x 8 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m4_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE32FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 210 /* e32, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i32>, i32 } @llvm.riscv.vleff.mask.nxv8i32.i32(<vscale x 8 x i32> %maskedoff, <vscale x 8 x i32>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 8 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe32m8_tama(<vscale x 16 x i1> %mask, <vscale x 16 x i32> %maskedoff, <vscale x 16 x i32> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe32m8_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE32FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE32FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 211 /* e32, m8, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 16 x i32>, i32 } @llvm.riscv.vleff.mask.nxv16i32.i32(<vscale x 16 x i32> %maskedoff, <vscale x 16 x i32>* %p, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 16 x i32>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1(<vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_:%[0-9]+]]:vr = PseudoVLE64FF_V_M1 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2(<vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4(<vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8(<vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8 [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1_tu(<vscale x 1 x i64> %merge, <vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_TU:%[0-9]+]]:vr = PseudoVLE64FF_V_M1_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.nxv1i64(<vscale x 1 x i64> %merge, <vscale x 1 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2_tu(<vscale x 2 x i64> %merge, <vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_TU:%[0-9]+]]:vrm2 = PseudoVLE64FF_V_M2_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.nxv2i64(<vscale x 2 x i64> %merge, <vscale x 2 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4_tu(<vscale x 4 x i64> %merge, <vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_TU:%[0-9]+]]:vrm4 = PseudoVLE64FF_V_M4_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.nxv4i64(<vscale x 4 x i64> %merge, <vscale x 4 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8_tu(<vscale x 8 x i64> %merge, <vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8_tu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_TU:%[0-9]+]]:vrm8 = PseudoVLE64FF_V_M8_TU [[COPY2]], [[COPY1]], [[COPY]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.nxv8i64(<vscale x 8 x i64> %merge, <vscale x 8 x i64>* %p, i32 %vl)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1_tumu(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2_tumu(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4_tumu(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8_tumu(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8_tumu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 27 /* e64, m8, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1_tamu(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2_tamu(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4_tamu(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8_tamu(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8_tamu
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 91 /* e64, m8, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1_tuma(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2_tuma(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4_tuma(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8_tuma(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8_tuma
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 155 /* e64, m8, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m1_tama(<vscale x 1 x i1> %mask, <vscale x 1 x i64> %maskedoff, <vscale x 1 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m1_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrnov0 = COPY $v8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M1_MASK:%[0-9]+]]:vrnov0 = PseudoVLE64FF_V_M1_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 1 x i64>, i32 } @llvm.riscv.vleff.mask.nxv1i64.i32(<vscale x 1 x i64> %maskedoff, <vscale x 1 x i64>* %p, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 1 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m2_tama(<vscale x 2 x i1> %mask, <vscale x 2 x i64> %maskedoff, <vscale x 2 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m2_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m2, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm2nov0 = COPY $v8m2
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M2_MASK:%[0-9]+]]:vrm2nov0 = PseudoVLE64FF_V_M2_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 2 x i64>, i32 } @llvm.riscv.vleff.mask.nxv2i64.i32(<vscale x 2 x i64> %maskedoff, <vscale x 2 x i64>* %p, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 2 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m4_tama(<vscale x 4 x i1> %mask, <vscale x 4 x i64> %maskedoff, <vscale x 4 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m4_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m4, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm4nov0 = COPY $v8m4
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M4_MASK:%[0-9]+]]:vrm4nov0 = PseudoVLE64FF_V_M4_MASK [[COPY2]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 4 x i64>, i32 } @llvm.riscv.vleff.mask.nxv4i64.i32(<vscale x 4 x i64> %maskedoff, <vscale x 4 x i64>* %p, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 4 x i64>, i32 } %0, 1
+  ret i32 %1
+}
+
+define i32 @vleffe64m8_tama(<vscale x 8 x i1> %mask, <vscale x 8 x i64> %maskedoff, <vscale x 8 x i64> *%p, i32 %vl) {
+  ; CHECK-LABEL: name: vleffe64m8_tama
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v0, $v8m8, $x10, $x11
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrm8 = COPY $v8m8
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   $v0 = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm8nov0 = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[PseudoVLE64FF_V_M8_MASK:%[0-9]+]]:vrm8nov0 = PseudoVLE64FF_V_M8_MASK [[COPY4]], [[COPY1]], $v0, [[COPY]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 219 /* e64, m8, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   $x10 = COPY [[PseudoReadVL]]
+  ; CHECK-NEXT:   PseudoRET implicit $x10
+entry:
+  %0 = tail call { <vscale x 8 x i64>, i32 } @llvm.riscv.vleff.mask.nxv8i64.i32(<vscale x 8 x i64> %maskedoff, <vscale x 8 x i64>* %p, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
+  %1 = extractvalue { <vscale x 8 x i64>, i32 } %0, 1
+  ret i32 %1
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll
new file mode 100644
index 000000000000..37ef7aad9689
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv32-readvl.ll
@@ -0,0 +1,732 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -stop-after=finalize-isel < %s \
+; RUN:   -target-abi=ilp32d | FileCheck %s
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i32)
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i1>, i32, i32)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i32)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i32, i32)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i32)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i32, i32)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.nxv1i64(i64* , i32)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i1>, i32, i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32, i32)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i32)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i32, i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32, i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32, i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32, i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32, i32)
+declare {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.nxv4i64(i64* , i32)
+declare {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i1>, i32, i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32, i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32, i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32, i32)
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i32)
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i1>, i32, i32)
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i32)
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i1>, i32, i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32, i32)
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.nxv2i64(i64* , i32)
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i1>, i32, i32)
+
+define void @test_vlseg2ff_nxv8i8(i8* %base, i32 %vl, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl, <vscale x 8 x i1> %mask, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %2, 2
+  store volatile i32 %3, i32* %outvl
+  %4 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
+  %5 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %4, 2
+  store volatile i32 %5, i32* %outvl
+  %6 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
+  %7 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %6, 2
+  store volatile i32 %7, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv16i8(i8* %base, i32 %vl, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv16i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E8FF_V_M2 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %vl, <vscale x 16 x i1> %mask, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
+  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
+  store volatile i32 %3, i32* %outvl
+  %4 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
+  %5 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
+  store volatile i32 %5, i32* %outvl
+  %6 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
+  %7 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
+  store volatile i32 %7, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv32i8(i8* %base, i32 %vl, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv32i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E8FF_V_M4 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i32 %vl)
+  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i32 %vl, <vscale x 32 x i1> %mask, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv32i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
+  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %2, 2
+  store volatile i32 %3, i32* %outvl
+  %4 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl, i32 2)
+  %5 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %4, 2
+  store volatile i32 %5, i32* %outvl
+  %6 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl, i32 3)
+  %7 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %6, 2
+  store volatile i32 %7, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv4i16(i16* %base, i32 %vl, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16FF_V_M1 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl, <vscale x 4 x i1> %mask, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %2, 2
+  store volatile i32 %3, i32* %outvl
+  %4 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
+  %5 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %4, 2
+  store volatile i32 %5, i32* %outvl
+  %6 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
+  %7 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %6, 2
+  store volatile i32 %7, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv8i16(i16* %base, i32 %vl, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E16FF_V_M2 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %vl, <vscale x 8 x i1> %mask, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
+  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %2, 2
+  store volatile i32 %3, i32* %outvl
+  %4 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 2)
+  %5 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %4, 2
+  store volatile i32 %5, i32* %outvl
+  %6 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 3)
+  %7 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %6, 2
+  store volatile i32 %7, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv16i16(i16* %base, i32 %vl, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv16i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E16FF_V_M4 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i32 %vl, <vscale x 16 x i1> %mask, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
+  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %2, 2
+  store volatile i32 %3, i32* %outvl
+  %4 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 2)
+  %5 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %4, 2
+  store volatile i32 %5, i32* %outvl
+  %6 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 3)
+  %7 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %6, 2
+  store volatile i32 %7, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv2i32(i32* %base, i32 %vl, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv2i32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32FF_V_M1 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl, <vscale x 2 x i1> %mask, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %2, 2
+  store volatile i32 %3, i32* %outvl
+  %4 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
+  %5 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %4, 2
+  store volatile i32 %5, i32* %outvl
+  %6 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
+  %7 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %6, 2
+  store volatile i32 %7, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv4i32(i32* %base, i32 %vl, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E32FF_V_M2 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %vl, <vscale x 4 x i1> %mask, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SW [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
+  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %2, 2
+  store volatile i32 %1, i32* %outvl
+  %4 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
+  %5 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %4, 2
+  store volatile i32 %1, i32* %outvl
+  %6 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
+  %7 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %6, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv1i64(i64* %base, i32 %vl, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv1i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64FF_V_M1 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.nxv1i64(i64* %base, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i32 %vl, <vscale x 1 x i1> %mask, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv1i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
+  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} %2, 2
+  store volatile i32 %3, i32* %outvl
+  %4 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 2)
+  %5 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} %4, 2
+  store volatile i32 %5, i32* %outvl
+  %6 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 3)
+  %7 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i32} %6, 2
+  store volatile i32 %7, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv2i64(i64* %base, i32 %vl, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv2i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E64FF_V_M2 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.nxv2i64(i64* %base, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i32 %vl, <vscale x 2 x i1> %mask, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
+  %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} %2, 2
+  store volatile i32 %3, i32* %outvl
+  %4 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 2)
+  %5 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} %4, 2
+  store volatile i32 %5, i32* %outvl
+  %6 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 3)
+  %7 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i32} %6, 2
+  store volatile i32 %7, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv4i64(i64* %base, i32 %vl, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E64FF_V_M4 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.nxv4i64(i64* %base, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv4i64(<vscale x 4 x i64> %val, i64* %base, i32 %vl, <vscale x 4 x i1> %mask, i32* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SW killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s32) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 0)
+  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} %0, 2
+  store volatile i32 %1, i32* %outvl
+  %2 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
+  %3 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} %2, 2
+  store volatile i32 %3, i32* %outvl
+  %4 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 2)
+  %5 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} %4, 2
+  store volatile i32 %5, i32* %outvl
+  %6 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 3)
+  %7 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i32} %6, 2
+  store volatile i32 %7, i32* %outvl
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll
new file mode 100644
index 000000000000..228ff9a7fcf2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlseg2ff-rv64-readvl.ll
@@ -0,0 +1,732 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel < %s \
+; RUN:   -target-abi=lp64d | FileCheck %s
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* , i64)
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i1>, i64, i64)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* , i64)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i64, i64)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* , i64)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i64, i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* , i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i1>, i64, i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg2ff.nxv1i32(i32* , i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i64, i64)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* , i64)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i64, i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg2ff.nxv4i8(i8* , i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i64, i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg2ff.nxv1i16(i16* , i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i64, i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* , i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i64, i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* , i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64, i64)
+declare {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* , i64)
+declare {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i1>, i64, i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* , i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i64, i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg2ff.nxv1i8(i8* , i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i64, i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg2ff.nxv2i8(i8* , i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i64, i64)
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>, i64} @llvm.riscv.vlseg2ff.nxv8i32(i32* , i64)
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i1>, i64, i64)
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* , i64)
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i1>, i64, i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg2ff.nxv2i16(i16* , i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i64, i64)
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* , i64)
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i1>, i64, i64)
+
+define void @test_vlseg2ff_nxv8i8(i8* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %vl, <vscale x 8 x i1> %mask, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 0 /* e8, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 64 /* e8, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 128 /* e8, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E8FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 192 /* e8, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %2, 2
+  store volatile i64 %3, i64* %outvl
+  %4 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 2)
+  %5 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %4, 2
+  store volatile i64 %5, i64* %outvl
+  %6 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
+  %7 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} %6, 2
+  store volatile i64 %7, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv16i8(i8* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv16i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E8FF_V_M2 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %vl, <vscale x 16 x i1> %mask, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 1 /* e8, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 65 /* e8, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 129 /* e8, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E8FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 193 /* e8, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
+  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} %0, 2
+  store volatile i64 %3, i64* %outvl
+  %4 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 2)
+  %5 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} %0, 2
+  store volatile i64 %5, i64* %outvl
+  %6 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 3)
+  %7 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} %0, 2
+  store volatile i64 %7, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv32i8(i8* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv32i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E8FF_V_M4 [[COPY2]], [[COPY1]], 3 /* e8 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i64 %vl, <vscale x 32 x i1> %mask, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv32i8
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 2 /* e8, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 66 /* e8, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 130 /* e8, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E8FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 3 /* e8 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 194 /* e8, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
+  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} %2, 2
+  store volatile i64 %3, i64* %outvl
+  %4 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i64 %vl, i64 2)
+  %5 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} %4, 2
+  store volatile i64 %5, i64* %outvl
+  %6 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i64 %vl, i64 3)
+  %7 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} %6, 2
+  store volatile i64 %7, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv4i16(i16* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E16FF_V_M1 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %vl, <vscale x 4 x i1> %mask, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 8 /* e16, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 72 /* e16, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 136 /* e16, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E16FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 200 /* e16, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} %2, 2
+  store volatile i64 %3, i64* %outvl
+  %4 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 2)
+  %5 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} %4, 2
+  store volatile i64 %5, i64* %outvl
+  %6 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
+  %7 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} %6, 2
+  store volatile i64 %7, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv8i16(i16* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E16FF_V_M2 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %vl, <vscale x 8 x i1> %mask, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv8i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 9 /* e16, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 73 /* e16, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 137 /* e16, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E16FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 201 /* e16, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
+  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} %2, 2
+  store volatile i64 %3, i64* %outvl
+  %4 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 2)
+  %5 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} %4, 2
+  store volatile i64 %5, i64* %outvl
+  %6 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 3)
+  %7 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} %6, 2
+  store volatile i64 %7, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv16i16(i16* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv16i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E16FF_V_M4 [[COPY2]], [[COPY1]], 4 /* e16 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i64 %vl, <vscale x 16 x i1> %mask, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv16i16
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 10 /* e16, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 74 /* e16, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 138 /* e16, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E16FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E16FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 4 /* e16 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 202 /* e16, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
+  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} %2, 2
+  store volatile i64 %3, i64* %outvl
+  %4 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 2)
+  %5 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} %4, 2
+  store volatile i64 %5, i64* %outvl
+  %6 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 3)
+  %7 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} %6, 2
+  store volatile i64 %7, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv2i32(i32* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv2i32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E32FF_V_M1 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %vl, <vscale x 2 x i1> %mask, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 16 /* e32, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 80 /* e32, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 144 /* e32, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E32FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 208 /* e32, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} %2, 2
+  store volatile i64 %3, i64* %outvl
+  %4 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 2)
+  %5 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} %4, 2
+  store volatile i64 %5, i64* %outvl
+  %6 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
+  %7 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} %6, 2
+  store volatile i64 %7, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv4i32(i32* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E32FF_V_M2 [[COPY2]], [[COPY1]], 5 /* e32 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %vl, <vscale x 4 x i1> %mask, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i32
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 17 /* e32, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 81 /* e32, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 145 /* e32, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E32FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E32FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 5 /* e32 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 209 /* e32, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SD [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
+  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} %2, 2
+  store volatile i64 %1, i64* %outvl
+  %4 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 2)
+  %5 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} %4, 2
+  store volatile i64 %1, i64* %outvl
+  %6 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
+  %7 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} %6, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv1i64(i64* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv1i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_:%[0-9]+]]:vrn2m1 = PseudoVLSEG2E64FF_V_M1 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %vl, <vscale x 1 x i1> %mask, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv1i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vr = COPY $v8
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m1nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm1_0, [[COPY4]], %subreg.sub_vrm1_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 24 /* e64, m1, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK1:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 88 /* e64, m1, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK2:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 152 /* e64, m1, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M1_MASK3:%[0-9]+]]:vrn2m1nov0 = PseudoVLSEG2E64FF_V_M1_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 216 /* e64, m1, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
+  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} %2, 2
+  store volatile i64 %3, i64* %outvl
+  %4 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 2)
+  %5 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} %4, 2
+  store volatile i64 %5, i64* %outvl
+  %6 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 3)
+  %7 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} %6, 2
+  store volatile i64 %7, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv2i64(i64* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv2i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_:%[0-9]+]]:vrn2m2 = PseudoVLSEG2E64FF_V_M2 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %vl, <vscale x 2 x i1> %mask, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv2i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m2, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm2 = COPY $v8m2
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m2nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm2_0, [[COPY4]], %subreg.sub_vrm2_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 25 /* e64, m2, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK1:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 89 /* e64, m2, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK2:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 153 /* e64, m2, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M2_MASK3:%[0-9]+]]:vrn2m2nov0 = PseudoVLSEG2E64FF_V_M2_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 217 /* e64, m2, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
+  %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} %2, 2
+  store volatile i64 %3, i64* %outvl
+  %4 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 2)
+  %5 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} %4, 2
+  store volatile i64 %5, i64* %outvl
+  %6 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 3)
+  %7 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} %6, 2
+  store volatile i64 %7, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_nxv4i64(i64* %base, i64 %vl, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_nxv4i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $x10, $x11, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_:%[0-9]+]]:vrn2m4 = PseudoVLSEG2E64FF_V_M4 [[COPY2]], [[COPY1]], 6 /* e64 */, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* %base, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  ret void
+}
+
+define void @test_vlseg2ff_mask_nxv4i64(<vscale x 4 x i64> %val, i64* %base, i64 %vl, <vscale x 4 x i1> %mask, i64* %outvl) {
+  ; CHECK-LABEL: name: test_vlseg2ff_mask_nxv4i64
+  ; CHECK: bb.0.entry:
+  ; CHECK-NEXT:   liveins: $v8m4, $x10, $x11, $v0, $x12
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gpr = COPY $x12
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gprnox0 = COPY $x11
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gpr = COPY $x10
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vrm4 = COPY $v8m4
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vrn2m4nov0 = REG_SEQUENCE [[COPY4]], %subreg.sub_vrm4_0, [[COPY4]], %subreg.sub_vrm4_1
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 0, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL:%[0-9]+]]:gpr = PseudoReadVL 26 /* e64, m4, tu, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK1:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 1, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL1:%[0-9]+]]:gpr = PseudoReadVL 90 /* e64, m4, ta, mu */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL1]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK2:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 2, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL2:%[0-9]+]]:gpr = PseudoReadVL 154 /* e64, m4, tu, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL2]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[PseudoVLSEG2E64FF_V_M4_MASK3:%[0-9]+]]:vrn2m4nov0 = PseudoVLSEG2E64FF_V_M4_MASK [[REG_SEQUENCE]], [[COPY3]], $v0, [[COPY2]], 6 /* e64 */, 3, implicit-def $vl
+  ; CHECK-NEXT:   [[PseudoReadVL3:%[0-9]+]]:gpr = PseudoReadVL 218 /* e64, m4, ta, ma */, implicit $vl
+  ; CHECK-NEXT:   SD killed [[PseudoReadVL3]], [[COPY]], 0 :: (volatile store (s64) into %ir.outvl)
+  ; CHECK-NEXT:   PseudoRET
+entry:
+  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 0)
+  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} %0, 2
+  store volatile i64 %1, i64* %outvl
+  %2 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
+  %3 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} %2, 2
+  store volatile i64 %3, i64* %outvl
+  %4 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 2)
+  %5 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} %4, 2
+  store volatile i64 %5, i64* %outvl
+  %6 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 3)
+  %7 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} %6, 2
+  store volatile i64 %7, i64* %outvl
+  ret void
+}


        


More information about the llvm-commits mailing list