[llvm-branch-commits] [llvm] e5e3290 - [RISCV] Implement vlsseg intrinsics.

Hsiangkai Wang via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Jan 20 19:57:32 PST 2021


Author: Hsiangkai Wang
Date: 2021-01-21T11:51:35+08:00
New Revision: e5e329023bb119631e7a756b47598cb0ce9cea5f

URL: https://github.com/llvm/llvm-project/commit/e5e329023bb119631e7a756b47598cb0ce9cea5f
DIFF: https://github.com/llvm/llvm-project/commit/e5e329023bb119631e7a756b47598cb0ce9cea5f.diff

LOG: [RISCV] Implement vlsseg intrinsics.

Define vlsseg intrinsics and pseudo instructions. Lower vlsseg intrinsics
to pseudo instructions in RISCVDAGToDAGISel.

Differential Revision: https://reviews.llvm.org/D94763

Added: 
    llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 981bf43c1eb9..db74ad6a6e1f 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -502,6 +502,25 @@ let TargetPrefix = "riscv" in {
                                  llvm_anyint_ty]),
                     [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
 
+  // For stride segment load
+  // Input: (pointer, offset, vl)
+  class RISCVSSegLoad<int nf>
+        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
+                                !add(nf, -1))),
+                    [LLVMPointerToElt<0>, llvm_anyint_ty, LLVMMatchType<1>],
+                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
+  // For stride segment load with mask
+  // Input: (maskedoff, pointer, offset, mask, vl)
+  class RISCVSSegLoadMask<int nf>
+        : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
+                                !add(nf, -1))),
+                    !listconcat(!listsplat(LLVMMatchType<0>, nf),
+                                [LLVMPointerToElt<0>,
+                                 llvm_anyint_ty,
+                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                                 LLVMMatchType<1>]),
+                    [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
+
   // For unit stride segment store
   // Input: (value, pointer, vl)
   class RISCVUSSegStore<int nf>
@@ -627,6 +646,10 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
     def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
   }
+  multiclass RISCVSSegLoad<int nf> {
+    def "int_riscv_" # NAME : RISCVSSegLoad<nf>;
+    def "int_riscv_" # NAME # "_mask" : RISCVSSegLoadMask<nf>;
+  }
   multiclass RISCVUSSegStore<int nf> {
     def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
     def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask<nf>;
@@ -924,6 +947,7 @@ let TargetPrefix = "riscv" in {
 
   foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
     defm vlseg # nf : RISCVUSSegLoad<nf>;
+    defm vlsseg # nf : RISCVSSegLoad<nf>;
     defm vsseg # nf : RISCVUSSegStore<nf>;
   }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index da8a073193e8..f1b3cf711c52 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -149,7 +149,8 @@ static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
   }
 }
 
-void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo) {
+void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo,
+                                    bool IsStrided) {
   SDLoc DL(Node);
   unsigned NF = Node->getNumValues() - 1;
   EVT VT = Node->getValueType(0);
@@ -157,9 +158,16 @@ void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo) {
   MVT XLenVT = Subtarget->getXLenVT();
   RISCVVLMUL LMUL = getLMUL(VT);
   SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
-  SDValue Operands[] = {Node->getOperand(2),       // Base pointer.
-                        Node->getOperand(3),       // VL.
-                        SEW, Node->getOperand(0)}; // Chain
+  SmallVector<SDValue, 5> Operands;
+  Operands.push_back(Node->getOperand(2)); // Base pointer.
+  if (IsStrided) {
+    Operands.push_back(Node->getOperand(3)); // Stride.
+    Operands.push_back(Node->getOperand(4)); // VL.
+  } else {
+    Operands.push_back(Node->getOperand(3)); // VL.
+  }
+  Operands.push_back(SEW);
+  Operands.push_back(Node->getOperand(0)); // Chain.
   const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
       IntNo, ScalarSize, static_cast<unsigned>(LMUL));
   SDNode *Load =
@@ -174,7 +182,8 @@ void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo) {
   CurDAG->RemoveDeadNode(Node);
 }
 
-void RISCVDAGToDAGISel::selectVLSEGMask(SDNode *Node, unsigned IntNo) {
+void RISCVDAGToDAGISel::selectVLSEGMask(SDNode *Node, unsigned IntNo,
+                                        bool IsStrided) {
   SDLoc DL(Node);
   unsigned NF = Node->getNumValues() - 1;
   EVT VT = Node->getValueType(0);
@@ -184,12 +193,19 @@ void RISCVDAGToDAGISel::selectVLSEGMask(SDNode *Node, unsigned IntNo) {
   SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
   SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
-  SDValue Operands[] = {MaskedOff,
-                        Node->getOperand(NF + 2), // Base pointer.
-                        Node->getOperand(NF + 3), // Mask.
-                        Node->getOperand(NF + 4), // VL.
-                        SEW,
-                        Node->getOperand(0)}; // Chain.
+  SmallVector<SDValue, 7> Operands;
+  Operands.push_back(MaskedOff);
+  Operands.push_back(Node->getOperand(NF + 2)); // Base pointer.
+  if (IsStrided) {
+    Operands.push_back(Node->getOperand(NF + 3)); // Stride.
+    Operands.push_back(Node->getOperand(NF + 4)); // Mask.
+    Operands.push_back(Node->getOperand(NF + 5)); // VL.
+  } else {
+    Operands.push_back(Node->getOperand(NF + 3)); // Mask.
+    Operands.push_back(Node->getOperand(NF + 4)); // VL.
+  }
+  Operands.push_back(SEW);
+  Operands.push_back(Node->getOperand(0)); /// Chain.
   const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
       IntNo, ScalarSize, static_cast<unsigned>(LMUL));
   SDNode *Load =
@@ -377,7 +393,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     case Intrinsic::riscv_vlseg6:
     case Intrinsic::riscv_vlseg7:
     case Intrinsic::riscv_vlseg8: {
-      selectVLSEG(Node, IntNo);
+      selectVLSEG(Node, IntNo, /*IsStrided=*/false);
       return;
     }
     case Intrinsic::riscv_vlseg2_mask:
@@ -387,7 +403,27 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     case Intrinsic::riscv_vlseg6_mask:
     case Intrinsic::riscv_vlseg7_mask:
     case Intrinsic::riscv_vlseg8_mask: {
-      selectVLSEGMask(Node, IntNo);
+      selectVLSEGMask(Node, IntNo, /*IsStrided=*/false);
+      return;
+    }
+    case Intrinsic::riscv_vlsseg2:
+    case Intrinsic::riscv_vlsseg3:
+    case Intrinsic::riscv_vlsseg4:
+    case Intrinsic::riscv_vlsseg5:
+    case Intrinsic::riscv_vlsseg6:
+    case Intrinsic::riscv_vlsseg7:
+    case Intrinsic::riscv_vlsseg8: {
+      selectVLSEG(Node, IntNo, /*IsStrided=*/true);
+      return;
+    }
+    case Intrinsic::riscv_vlsseg2_mask:
+    case Intrinsic::riscv_vlsseg3_mask:
+    case Intrinsic::riscv_vlsseg4_mask:
+    case Intrinsic::riscv_vlsseg5_mask:
+    case Intrinsic::riscv_vlsseg6_mask:
+    case Intrinsic::riscv_vlsseg7_mask:
+    case Intrinsic::riscv_vlsseg8_mask: {
+      selectVLSEGMask(Node, IntNo, /*IsStrided=*/true);
       return;
     }
     }

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 3fa39595f276..8215abbe60e9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -55,8 +55,8 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
   bool selectVSplatSimm5(SDValue N, SDValue &SplatVal);
   bool selectVSplatUimm5(SDValue N, SDValue &SplatVal);
 
-  void selectVLSEG(SDNode *Node, unsigned IntNo);
-  void selectVLSEGMask(SDNode *Node, unsigned IntNo);
+  void selectVLSEG(SDNode *Node, unsigned IntNo, bool IsStrided);
+  void selectVLSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided);
   void selectVSSEG(SDNode *Node, unsigned IntNo);
   void selectVSSEGMask(SDNode *Node, unsigned IntNo);
 

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 0fadeb751056..71bb8562bedd 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -439,7 +439,8 @@ class PseudoToVInst<string PseudoInst> {
 
 class ToLowerCase<string Upper> {
   string L = !subst("VLSEG", "vlseg",
-             !subst("VSSEG", "vsseg", Upper));
+             !subst("VLSSEG", "vlsseg",
+             !subst("VSSEG", "vsseg", Upper)));
 }
 
 // Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2
@@ -1009,6 +1010,40 @@ class VPseudoUSSegLoadMask<VReg RetClass, bits<11> EEW>:
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
+class VPseudoSSegLoadNoMask<VReg RetClass, bits<11> EEW>:
+      Pseudo<(outs RetClass:$rd),
+             (ins GPR:$rs1, GPR:$offset, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo,
+      RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoSSegLoadMask<VReg RetClass, bits<11> EEW>:
+      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+             (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
+                  GPR:$offset, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo,
+      RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Constraints = "$rd = $merge";
+  let Uses = [VL, VTYPE];
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasMergeOp = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
 class VPseudoUSSegStoreNoMask<VReg ValClass, bits<11> EEW>:
       Pseudo<(outs),
              (ins ValClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
@@ -1564,6 +1599,21 @@ multiclass VPseudoUSSegLoad {
   }
 }
 
+multiclass VPseudoSSegLoad {
+  foreach eew = EEWList in {
+    foreach lmul = MxSet<eew>.m in {
+      defvar LInfo = lmul.MX;
+      let VLMul = lmul.value in {
+        foreach nf = NFSet<lmul>.L in {
+          defvar vreg = SegRegClass<lmul, nf>.RC;
+          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegLoadNoMask<vreg, eew>;
+          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegLoadMask<vreg, eew>;
+        }
+      }
+    }
+  }
+}
+
 multiclass VPseudoUSSegStore {
   foreach eew = EEWList in {
     foreach lmul = MxSet<eew>.m in {
@@ -2778,6 +2828,7 @@ foreach eew = EEWList in {
 // 7.8. Vector Load/Store Segment Instructions
 //===----------------------------------------------------------------------===//
 defm PseudoVLSEG : VPseudoUSSegLoad;
+defm PseudoVLSSEG : VPseudoSSegLoad;
 defm PseudoVSSEG : VPseudoUSSegStore;
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll
new file mode 100644
index 000000000000..d1851668e482
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll
@@ -0,0 +1,4722 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16*, i32, i32)
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, i32, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i16> @test_vlsseg2_nxv16i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
+  ret <vscale x 16 x i16> %1
+}
+
+define <vscale x 16 x i16> @test_vlsseg2_mask_nxv16i16(i16* %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
+  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.mask.nxv16i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
+  ret <vscale x 16 x i16> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8*, i32, i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlsseg2_nxv1i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg2_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8*, i32, i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlsseg3_nxv1i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg3_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8*, i32, i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlsseg4_nxv1i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg4_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8*, i32, i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlsseg5_nxv1i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg5_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8*, i32, i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlsseg6_nxv1i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg6_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8*, i32, i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlsseg7_nxv1i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg7_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8*, i32, i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlsseg8_nxv1i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg8_mask_nxv1i8(i8* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8*, i32, i32)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i32, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @test_vlsseg2_nxv16i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+  ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlsseg2_mask_nxv16i8(i8* %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+  ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8*, i32, i32)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i32, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @test_vlsseg3_nxv16i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+  ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlsseg3_mask_nxv16i8(i8* %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e8.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+  ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8*, i32, i32)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i32, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @test_vlsseg4_nxv16i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+  ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlsseg4_mask_nxv16i8(i8* %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e8.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+  ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32*, i32, i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlsseg2_nxv2i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg2_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32*, i32, i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlsseg3_nxv2i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg3_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32*, i32, i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlsseg4_nxv2i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg4_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32*, i32, i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlsseg5_nxv2i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg5_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32*, i32, i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlsseg6_nxv2i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg6_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32*, i32, i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlsseg7_nxv2i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg7_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32*, i32, i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlsseg8_nxv2i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg8_mask_nxv2i32(i32* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16*, i32, i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlsseg2_nxv4i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg2_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16*, i32, i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlsseg3_nxv4i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg3_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16*, i32, i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlsseg4_nxv4i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg4_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16*, i32, i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlsseg5_nxv4i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg5_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16*, i32, i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlsseg6_nxv4i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg6_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16*, i32, i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlsseg7_nxv4i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg7_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16*, i32, i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlsseg8_nxv4i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg8_mask_nxv4i16(i16* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32*, i32, i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlsseg2_nxv1i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg2_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32*, i32, i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlsseg3_nxv1i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg3_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32*, i32, i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlsseg4_nxv1i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg4_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32*, i32, i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlsseg5_nxv1i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg5_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32*, i32, i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlsseg6_nxv1i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg6_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32*, i32, i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlsseg7_nxv1i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg7_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32*, i32, i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlsseg8_nxv1i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg8_mask_nxv1i32(i32* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16*, i32, i32)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @test_vlsseg2_nxv8i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlsseg2_mask_nxv8i16(i16* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+  ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16*, i32, i32)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @test_vlsseg3_nxv8i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlsseg3_mask_nxv8i16(i16* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+  ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16*, i32, i32)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @test_vlsseg4_nxv8i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlsseg4_mask_nxv8i16(i16* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+  ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8*, i32, i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlsseg2_nxv8i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg2_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8*, i32, i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlsseg3_nxv8i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg3_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8*, i32, i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlsseg4_nxv8i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg4_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8*, i32, i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlsseg5_nxv8i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg5_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8*, i32, i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlsseg6_nxv8i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg6_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8*, i32, i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlsseg7_nxv8i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg7_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8*, i32, i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlsseg8_nxv8i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg8_mask_nxv8i8(i8* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32*, i32, i32)
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @test_vlsseg2_nxv8i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
+  ret <vscale x 8 x i32> %1
+}
+
+define <vscale x 8 x i32> @test_vlsseg2_mask_nxv8i32(i32* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
+  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.mask.nxv8i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
+  ret <vscale x 8 x i32> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8*, i32, i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlsseg2_nxv4i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg2_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8*, i32, i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlsseg3_nxv4i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg3_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8*, i32, i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlsseg4_nxv4i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg4_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8*, i32, i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlsseg5_nxv4i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg5_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8*, i32, i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlsseg6_nxv4i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg6_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8*, i32, i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlsseg7_nxv4i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg7_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8*, i32, i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlsseg8_nxv4i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg8_mask_nxv4i8(i8* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16*, i32, i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlsseg2_nxv1i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg2_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16*, i32, i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlsseg3_nxv1i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg3_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16*, i32, i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlsseg4_nxv1i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg4_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16*, i32, i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlsseg5_nxv1i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg5_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16*, i32, i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlsseg6_nxv1i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg6_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16*, i32, i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlsseg7_nxv1i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg7_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16*, i32, i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlsseg8_nxv1i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg8_mask_nxv1i16(i16* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8*, i32, i32)
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, i32, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @test_vlsseg2_nxv32i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
+  ret <vscale x 32 x i8> %1
+}
+
+define <vscale x 32 x i8> @test_vlsseg2_mask_nxv32i8(i8* %base, i32 %offset, i32 %vl, <vscale x 32 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e8,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
+  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.mask.nxv32i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, i32 %offset, <vscale x 32 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
+  ret <vscale x 32 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8*, i32, i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlsseg2_nxv2i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg2_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8*, i32, i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlsseg3_nxv2i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg3_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8*, i32, i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlsseg4_nxv2i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg4_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8*, i32, i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlsseg5_nxv2i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg5_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8*, i32, i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlsseg6_nxv2i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg6_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8*, i32, i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlsseg7_nxv2i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg7_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8*, i32, i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlsseg8_nxv2i8(i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg8_mask_nxv2i8(i8* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16*, i32, i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlsseg2_nxv2i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg2_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16*, i32, i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlsseg3_nxv2i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg3_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16*, i32, i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlsseg4_nxv2i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg4_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16*, i32, i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlsseg5_nxv2i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg5_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16*, i32, i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlsseg6_nxv2i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg6_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16*, i32, i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlsseg7_nxv2i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg7_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16*, i32, i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlsseg8_nxv2i16(i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg8_mask_nxv2i16(i16* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32*, i32, i32)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @test_vlsseg2_nxv4i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+  ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlsseg2_mask_nxv4i32(i32* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+  ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32*, i32, i32)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @test_vlsseg3_nxv4i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+  ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlsseg3_mask_nxv4i32(i32* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+  ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32*, i32, i32)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @test_vlsseg4_nxv4i32(i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+  ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlsseg4_mask_nxv4i32(i32* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+  ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half*, i32, i32)
+declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, half*, i32, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x half> @test_vlsseg2_nxv16f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
+  ret <vscale x 16 x half> %1
+}
+
+define <vscale x 16 x half> @test_vlsseg2_mask_nxv16f16(half* %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
+  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.mask.nxv16f16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
+  ret <vscale x 16 x half> %3
+}
+
+declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double*, i32, i32)
+declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, double*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x double> @test_vlsseg2_nxv4f64(double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
+  ret <vscale x 4 x double> %1
+}
+
+define <vscale x 4 x double> @test_vlsseg2_mask_nxv4f64(double* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e64.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
+  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.mask.nxv4f64(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
+  ret <vscale x 4 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double*, i32, i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlsseg2_nxv1f64(double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg2_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double*, i32, i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlsseg3_nxv1f64(double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg3_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double*, i32, i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlsseg4_nxv1f64(double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg4_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double*, i32, i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlsseg5_nxv1f64(double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg5_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double*, i32, i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlsseg6_nxv1f64(double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg6_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double*, i32, i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlsseg7_nxv1f64(double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg7_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double*, i32, i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlsseg8_nxv1f64(double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg8_mask_nxv1f64(double* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float*, i32, i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlsseg2_nxv2f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg2_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float*, i32, i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlsseg3_nxv2f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg3_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float*, i32, i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlsseg4_nxv2f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg4_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float*, i32, i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlsseg5_nxv2f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg5_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float*, i32, i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlsseg6_nxv2f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg6_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float*, i32, i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlsseg7_nxv2f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg7_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float*, i32, i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlsseg8_nxv2f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg8_mask_nxv2f32(float* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half*, i32, i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlsseg2_nxv1f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg2_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half*, i32, i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlsseg3_nxv1f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg3_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half*, i32, i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlsseg4_nxv1f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg4_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half*, i32, i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlsseg5_nxv1f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg5_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half*, i32, i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlsseg6_nxv1f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg6_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half*, i32, i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlsseg7_nxv1f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg7_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half*, i32, i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlsseg8_nxv1f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg8_mask_nxv1f16(half* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float*, i32, i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlsseg2_nxv1f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg2_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float*, i32, i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlsseg3_nxv1f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg3_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float*, i32, i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlsseg4_nxv1f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg4_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float*, i32, i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlsseg5_nxv1f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg5_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float*, i32, i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlsseg6_nxv1f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg6_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float*, i32, i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlsseg7_nxv1f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg7_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float*, i32, i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlsseg8_nxv1f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg8_mask_nxv1f32(float* %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half*, i32, i32)
+declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, half*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x half> @test_vlsseg2_nxv8f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlsseg2_mask_nxv8f16(half* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+  ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half*, i32, i32)
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x half> @test_vlsseg3_nxv8f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlsseg3_mask_nxv8f16(half* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+  ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half*, i32, i32)
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x half> @test_vlsseg4_nxv8f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlsseg4_mask_nxv8f16(half* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+  ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float*, i32, i32)
+declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, float*, i32, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x float> @test_vlsseg2_nxv8f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
+  ret <vscale x 8 x float> %1
+}
+
+define <vscale x 8 x float> @test_vlsseg2_mask_nxv8f32(float* %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
+  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.mask.nxv8f32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
+  ret <vscale x 8 x float> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double*, i32, i32)
+declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, double*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x double> @test_vlsseg2_nxv2f64(double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+  ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlsseg2_mask_nxv2f64(double* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e64.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+  ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double*, i32, i32)
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x double> @test_vlsseg3_nxv2f64(double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+  ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlsseg3_mask_nxv2f64(double* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e64.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+  ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double*, i32, i32)
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x double> @test_vlsseg4_nxv2f64(double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+  ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlsseg4_mask_nxv2f64(double* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e64.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+  ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half*, i32, i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlsseg2_nxv4f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg2_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half*, i32, i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlsseg3_nxv4f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg3_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half*, i32, i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlsseg4_nxv4f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg4_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half*, i32, i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlsseg5_nxv4f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg5_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half*, i32, i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlsseg6_nxv4f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg6_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half*, i32, i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlsseg7_nxv4f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg7_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half*, i32, i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlsseg8_nxv4f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg8_mask_nxv4f16(half* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half*, i32, i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlsseg2_nxv2f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg2_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half*, i32, i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlsseg3_nxv2f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg3_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half*, i32, i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlsseg4_nxv2f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg4_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half*, i32, i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlsseg5_nxv2f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg5_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half*, i32, i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlsseg6_nxv2f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg6_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half*, i32, i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlsseg7_nxv2f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg7_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half*, i32, i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlsseg8_nxv2f16(half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg8_mask_nxv2f16(half* %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float*, i32, i32)
+declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, float*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x float> @test_vlsseg2_nxv4f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+  ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlsseg2_mask_nxv4f32(float* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+  ret <vscale x 4 x float> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float*, i32, i32)
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x float> @test_vlsseg3_nxv4f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+  ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlsseg3_mask_nxv4f32(float* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+  ret <vscale x 4 x float> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float*, i32, i32)
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i32, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x float> @test_vlsseg4_nxv4f32(float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+  ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlsseg4_mask_nxv4f32(float* %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i32 %offset, i32 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+  ret <vscale x 4 x float> %3
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll
new file mode 100644
index 000000000000..2c8f2e81199c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll
@@ -0,0 +1,5120 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16*, i64, i64)
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, i64, <vscale x 16 x i1>, i64)
+
+define <vscale x 16 x i16> @test_vlsseg2_nxv16i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
+  ret <vscale x 16 x i16> %1
+}
+
+define <vscale x 16 x i16> @test_vlsseg2_mask_nxv16i16(i16* %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
+  %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.mask.nxv16i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
+  ret <vscale x 16 x i16> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32*, i64, i64)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i32> @test_vlsseg2_nxv4i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+  ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlsseg2_mask_nxv4i32(i32* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+  ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32*, i64, i64)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i32> @test_vlsseg3_nxv4i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+  ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlsseg3_mask_nxv4i32(i32* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+  ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32*, i64, i64)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i32> @test_vlsseg4_nxv4i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+  ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlsseg4_mask_nxv4i32(i32* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+  %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+  ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8*, i64, i64)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, <vscale x 16 x i1>, i64)
+
+define <vscale x 16 x i8> @test_vlsseg2_nxv16i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+  ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlsseg2_mask_nxv16i8(i8* %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+  ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8*, i64, i64)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, <vscale x 16 x i1>, i64)
+
+define <vscale x 16 x i8> @test_vlsseg3_nxv16i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+  ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlsseg3_mask_nxv16i8(i8* %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e8.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+  ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8*, i64, i64)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, <vscale x 16 x i1>, i64)
+
+define <vscale x 16 x i8> @test_vlsseg4_nxv16i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+  ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlsseg4_mask_nxv16i8(i8* %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e8.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+  %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+  ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(i64*, i64, i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlsseg2_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+  ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlsseg2_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+  ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(i64*, i64, i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlsseg3_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+  ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlsseg3_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+  ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(i64*, i64, i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlsseg4_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+  ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlsseg4_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+  ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(i64*, i64, i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlsseg5_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+  ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlsseg5_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+  ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(i64*, i64, i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlsseg6_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+  ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlsseg6_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+  ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(i64*, i64, i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlsseg7_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+  ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlsseg7_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+  ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(i64*, i64, i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlsseg8_nxv1i64(i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+  ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlsseg8_mask_nxv1i64(i64* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+  %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+  ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32*, i64, i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlsseg2_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg2_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32*, i64, i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlsseg3_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg3_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32*, i64, i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlsseg4_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg4_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32*, i64, i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlsseg5_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg5_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32*, i64, i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlsseg6_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg6_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32*, i64, i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlsseg7_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg7_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32*, i64, i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlsseg8_nxv1i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+  ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlsseg8_mask_nxv1i32(i32* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+  %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+  ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16*, i64, i64)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i16> @test_vlsseg2_nxv8i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlsseg2_mask_nxv8i16(i16* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+  ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16*, i64, i64)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i16> @test_vlsseg3_nxv8i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlsseg3_mask_nxv8i16(i16* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+  ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16*, i64, i64)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i16> @test_vlsseg4_nxv8i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+  ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlsseg4_mask_nxv8i16(i16* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+  %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+  ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8*, i64, i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlsseg2_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg2_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8*, i64, i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlsseg3_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg3_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8*, i64, i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlsseg4_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg4_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8*, i64, i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlsseg5_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg5_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8*, i64, i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlsseg6_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg6_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8*, i64, i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlsseg7_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg7_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8*, i64, i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlsseg8_nxv4i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+  ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlsseg8_mask_nxv4i8(i8* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,tu,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+  %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+  ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16*, i64, i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlsseg2_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg2_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16*, i64, i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlsseg3_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg3_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16*, i64, i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlsseg4_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg4_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16*, i64, i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlsseg5_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg5_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16*, i64, i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlsseg6_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg6_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16*, i64, i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlsseg7_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg7_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16*, i64, i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlsseg8_nxv1i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+  ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlsseg8_mask_nxv1i16(i16* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+  %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+  ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32*, i64, i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlsseg2_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg2_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32*, i64, i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlsseg3_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg3_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32*, i64, i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlsseg4_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg4_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32*, i64, i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlsseg5_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg5_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32*, i64, i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlsseg6_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg6_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32*, i64, i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlsseg7_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg7_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32*, i64, i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlsseg8_nxv2i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+  ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlsseg8_mask_nxv2i32(i32* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+  %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+  ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8*, i64, i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlsseg2_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg2_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8*, i64, i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlsseg3_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg3_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8*, i64, i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlsseg4_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg4_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8*, i64, i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlsseg5_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg5_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8*, i64, i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlsseg6_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg6_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8*, i64, i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlsseg7_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg7_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8*, i64, i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlsseg8_nxv8i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+  ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlsseg8_mask_nxv8i8(i8* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+  %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+  ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(i64*, i64, i64)
+declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i64> @test_vlsseg2_nxv4i64(i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
+  ret <vscale x 4 x i64> %1
+}
+
+define <vscale x 4 x i64> @test_vlsseg2_mask_nxv4i64(i64* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e64.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 0
+  %2 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.mask.nxv4i64(<vscale x 4 x i64> %1,<vscale x 4 x i64> %1, i64* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %2, 1
+  ret <vscale x 4 x i64> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16*, i64, i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlsseg2_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg2_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16*, i64, i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlsseg3_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg3_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16*, i64, i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlsseg4_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg4_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16*, i64, i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlsseg5_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg5_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16*, i64, i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlsseg6_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg6_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16*, i64, i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlsseg7_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg7_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16*, i64, i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlsseg8_nxv4i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+  ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlsseg8_mask_nxv4i16(i16* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+  %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+  ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8*, i64, i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlsseg2_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg2_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8*, i64, i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlsseg3_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg3_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8*, i64, i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlsseg4_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg4_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8*, i64, i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlsseg5_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg5_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8*, i64, i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlsseg6_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg6_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8*, i64, i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlsseg7_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg7_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8*, i64, i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlsseg8_nxv1i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+  ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlsseg8_mask_nxv1i8(i8* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,tu,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+  %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+  ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8*, i64, i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlsseg2_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg2_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8*, i64, i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlsseg3_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg3_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg3e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8*, i64, i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlsseg4_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg4_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg4e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8*, i64, i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlsseg5_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg5_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg5e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8*, i64, i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlsseg6_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg6_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg6e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8*, i64, i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlsseg7_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg7_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg7e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8*, i64, i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlsseg8_nxv2i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+  ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlsseg8_mask_nxv2i8(i8* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,tu,mu
+; CHECK-NEXT:    vlsseg8e8.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+  %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+  ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32*, i64, i64)
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i32> @test_vlsseg2_nxv8i32(i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
+  ret <vscale x 8 x i32> %1
+}
+
+define <vscale x 8 x i32> @test_vlsseg2_mask_nxv8i32(i32* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
+  %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.mask.nxv8i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
+  ret <vscale x 8 x i32> %3
+}
+
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8*, i64, i64)
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, i64, <vscale x 32 x i1>, i64)
+
+define <vscale x 32 x i8> @test_vlsseg2_nxv32i8(i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
+  ret <vscale x 32 x i8> %1
+}
+
+define <vscale x 32 x i8> @test_vlsseg2_mask_nxv32i8(i8* %base, i64 %offset, i64 %vl, <vscale x 32 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e8.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e8,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e8.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
+  %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.mask.nxv32i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, i64 %offset, <vscale x 32 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
+  ret <vscale x 32 x i8> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16*, i64, i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlsseg2_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg2_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16*, i64, i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlsseg3_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg3_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16*, i64, i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlsseg4_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg4_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16*, i64, i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlsseg5_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg5_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16*, i64, i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlsseg6_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg6_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16*, i64, i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlsseg7_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg7_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16*, i64, i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlsseg8_nxv2i16(i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+  ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlsseg8_mask_nxv2i16(i16* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+  %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+  ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(i64*, i64, i64)
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i64> @test_vlsseg2_nxv2i64(i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
+  ret <vscale x 2 x i64> %1
+}
+
+define <vscale x 2 x i64> @test_vlsseg2_mask_nxv2i64(i64* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e64.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
+  %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
+  ret <vscale x 2 x i64> %3
+}
+
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(i64*, i64, i64)
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i64> @test_vlsseg3_nxv2i64(i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
+  ret <vscale x 2 x i64> %1
+}
+
+define <vscale x 2 x i64> @test_vlsseg3_mask_nxv2i64(i64* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e64.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
+  %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
+  ret <vscale x 2 x i64> %3
+}
+
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(i64*, i64, i64)
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i64> @test_vlsseg4_nxv2i64(i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
+  ret <vscale x 2 x i64> %1
+}
+
+define <vscale x 2 x i64> @test_vlsseg4_mask_nxv2i64(i64* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e64.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
+  %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
+  ret <vscale x 2 x i64> %3
+}
+
+declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half*, i64, i64)
+declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, half*, i64, <vscale x 16 x i1>, i64)
+
+define <vscale x 16 x half> @test_vlsseg2_nxv16f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
+  ret <vscale x 16 x half> %1
+}
+
+define <vscale x 16 x half> @test_vlsseg2_mask_nxv16f16(half* %base, i64 %offset, i64 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
+  %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.mask.nxv16f16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
+  ret <vscale x 16 x half> %3
+}
+
+declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double*, i64, i64)
+declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, double*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x double> @test_vlsseg2_nxv4f64(double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
+  ret <vscale x 4 x double> %1
+}
+
+define <vscale x 4 x double> @test_vlsseg2_mask_nxv4f64(double* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e64.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
+  %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.mask.nxv4f64(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
+  ret <vscale x 4 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double*, i64, i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlsseg2_nxv1f64(double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg2_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double*, i64, i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlsseg3_nxv1f64(double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg3_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double*, i64, i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlsseg4_nxv1f64(double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg4_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double*, i64, i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlsseg5_nxv1f64(double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg5_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double*, i64, i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlsseg6_nxv1f64(double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg6_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double*, i64, i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlsseg7_nxv1f64(double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg7_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double*, i64, i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlsseg8_nxv1f64(double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e64.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+  ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlsseg8_mask_nxv1f64(double* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e64.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e64.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+  %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+  ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float*, i64, i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlsseg2_nxv2f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg2_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float*, i64, i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlsseg3_nxv2f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg3_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float*, i64, i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlsseg4_nxv2f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg4_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float*, i64, i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlsseg5_nxv2f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg5_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float*, i64, i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlsseg6_nxv2f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg6_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float*, i64, i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlsseg7_nxv2f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg7_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float*, i64, i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlsseg8_nxv2f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+  ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlsseg8_mask_nxv2f32(float* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+  %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+  ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half*, i64, i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlsseg2_nxv1f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg2_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half*, i64, i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlsseg3_nxv1f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg3_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half*, i64, i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlsseg4_nxv1f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg4_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half*, i64, i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlsseg5_nxv1f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg5_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half*, i64, i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlsseg6_nxv1f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg6_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half*, i64, i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlsseg7_nxv1f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg7_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half*, i64, i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlsseg8_nxv1f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+  ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlsseg8_mask_nxv1f16(half* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+  %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+  ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float*, i64, i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlsseg2_nxv1f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg2_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float*, i64, i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlsseg3_nxv1f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg3_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float*, i64, i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlsseg4_nxv1f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg4_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float*, i64, i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlsseg5_nxv1f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg5_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg5e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float*, i64, i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlsseg6_nxv1f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg6_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg6e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float*, i64, i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlsseg7_nxv1f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg7_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg7e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float*, i64, i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlsseg8_nxv1f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+  ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlsseg8_mask_nxv1f32(float* %base, i64 %offset, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,tu,mu
+; CHECK-NEXT:    vlsseg8e32.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+  %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+  ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half*, i64, i64)
+declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x half> @test_vlsseg2_nxv8f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlsseg2_mask_nxv8f16(half* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+  ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half*, i64, i64)
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x half> @test_vlsseg3_nxv8f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlsseg3_mask_nxv8f16(half* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+  ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half*, i64, i64)
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x half> @test_vlsseg4_nxv8f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+  ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlsseg4_mask_nxv8f16(half* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+  %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+  ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float*, i64, i64)
+declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, float*, i64, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x float> @test_vlsseg2_nxv8f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
+  ret <vscale x 8 x float> %1
+}
+
+define <vscale x 8 x float> @test_vlsseg2_mask_nxv8f32(float* %base, i64 %offset, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v12, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
+  %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.mask.nxv8f32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
+  ret <vscale x 8 x float> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double*, i64, i64)
+declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x double> @test_vlsseg2_nxv2f64(double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+  ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlsseg2_mask_nxv2f64(double* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e64.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e64.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+  ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double*, i64, i64)
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x double> @test_vlsseg3_nxv2f64(double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+  ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlsseg3_mask_nxv2f64(double* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e64.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e64.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+  ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double*, i64, i64)
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x double> @test_vlsseg4_nxv2f64(double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+  ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlsseg4_mask_nxv2f64(double* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e64.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e64.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+  %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+  ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half*, i64, i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlsseg2_nxv4f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg2_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half*, i64, i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlsseg3_nxv4f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg3_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half*, i64, i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlsseg4_nxv4f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg4_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half*, i64, i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlsseg5_nxv4f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg5_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half*, i64, i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlsseg6_nxv4f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg6_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half*, i64, i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlsseg7_nxv4f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg7_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half*, i64, i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlsseg8_nxv4f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+  ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlsseg8_mask_nxv4f16(half* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+  %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+  ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half*, i64, i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlsseg2_nxv2f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg2_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg2e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half*, i64, i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlsseg3_nxv2f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg3_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg3e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half*, i64, i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlsseg4_nxv2f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg4_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg4e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half*, i64, i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlsseg5_nxv2f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg5_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg5_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg5_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg5e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half*, i64, i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlsseg6_nxv2f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg6_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg6_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg6_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg6e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half*, i64, i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlsseg7_nxv2f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg7_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg7_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg7_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg7e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half*, i64, i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlsseg8_nxv2f16(half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg8_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+  ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlsseg8_mask_nxv2f16(half* %base, i64 %offset, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg8_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1
+; CHECK-NEXT:    vmv1r.v v16, v15
+; CHECK-NEXT:    vmv1r.v v17, v15
+; CHECK-NEXT:    vmv1r.v v18, v15
+; CHECK-NEXT:    vmv1r.v v19, v15
+; CHECK-NEXT:    vmv1r.v v20, v15
+; CHECK-NEXT:    vmv1r.v v21, v15
+; CHECK-NEXT:    vmv1r.v v22, v15
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,tu,mu
+; CHECK-NEXT:    vlsseg8e16.v v15, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+  %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+  ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float*, i64, i64)
+declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x float> @test_vlsseg2_nxv4f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg2_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+  ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlsseg2_mask_nxv4f32(float* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg2_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg2e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+  ret <vscale x 4 x float> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float*, i64, i64)
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x float> @test_vlsseg3_nxv4f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg3_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+  ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlsseg3_mask_nxv4f32(float* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg3_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg3e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+  ret <vscale x 4 x float> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float*, i64, i64)
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x float> @test_vlsseg4_nxv4f32(float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vlsseg4_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+  ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlsseg4_mask_nxv4f32(float* %base, i64 %offset, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlsseg4_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1
+; CHECK-NEXT:    vmv2r.v v16, v14
+; CHECK-NEXT:    vmv2r.v v18, v14
+; CHECK-NEXT:    vmv2r.v v20, v14
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,tu,mu
+; CHECK-NEXT:    vlsseg4e32.v v14, (a0), a1, v0.t
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT:    ret
+entry:
+  %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl)
+  %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+  %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+  ret <vscale x 4 x float> %3
+}


        


More information about the llvm-branch-commits mailing list