[llvm-branch-commits] [llvm] a8b96ea - [RISCV] Implement vssseg intrinsics.

Hsiangkai Wang via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Jan 20 19:57:36 PST 2021


Author: Hsiangkai Wang
Date: 2021-01-21T11:51:35+08:00
New Revision: a8b96eadfd93f1641c72c378e33af636f463ab02

URL: https://github.com/llvm/llvm-project/commit/a8b96eadfd93f1641c72c378e33af636f463ab02
DIFF: https://github.com/llvm/llvm-project/commit/a8b96eadfd93f1641c72c378e33af636f463ab02.diff

LOG: [RISCV] Implement vssseg intrinsics.

Define vlsseg intrinsics and pseudo instructions. Lower vlsseg
intrinsics to pseudo instructions in RISCVDAGToDAGISel.

Differential Revision: https://reviews.llvm.org/D94863

Added: 
    llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index db74ad6a6e1f..f4d7b84c00f5 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -540,6 +540,26 @@ let TargetPrefix = "riscv" in {
                                  llvm_anyint_ty]),
                     [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
 
+  // For stride segment store
+  // Input: (value, pointer, offset, vl)
+  class RISCVSSegStore<int nf>
+        : Intrinsic<[],
+                    !listconcat([llvm_anyvector_ty],
+                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
+                                [LLVMPointerToElt<0>, llvm_anyint_ty,
+                                 LLVMMatchType<1>]),
+                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
+  // For stride segment store with mask
+  // Input: (value, pointer, offset, mask, vl)
+  class RISCVSSegStoreMask<int nf>
+        : Intrinsic<[],
+                    !listconcat([llvm_anyvector_ty],
+                                !listsplat(LLVMMatchType<0>, !add(nf, -1)),
+                                [LLVMPointerToElt<0>, llvm_anyint_ty,
+                                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                                 LLVMMatchType<1>]),
+                    [NoCapture<ArgIndex<nf>>, IntrWriteMem]>, RISCVVIntrinsic;
+
   multiclass RISCVUSLoad {
     def "int_riscv_" # NAME : RISCVUSLoad;
     def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
@@ -654,6 +674,10 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" # NAME : RISCVUSSegStore<nf>;
     def "int_riscv_" # NAME # "_mask" : RISCVUSSegStoreMask<nf>;
   }
+  multiclass RISCVSSegStore<int nf> {
+    def "int_riscv_" # NAME : RISCVSSegStore<nf>;
+    def "int_riscv_" # NAME # "_mask" : RISCVSSegStoreMask<nf>;
+  }
 
   defm vle : RISCVUSLoad;
   defm vleff : RISCVUSLoad;
@@ -949,6 +973,7 @@ let TargetPrefix = "riscv" in {
     defm vlseg # nf : RISCVUSSegLoad<nf>;
     defm vlsseg # nf : RISCVSSegLoad<nf>;
     defm vsseg # nf : RISCVUSSegStore<nf>;
+    defm vssseg # nf : RISCVSSegStore<nf>;
   }
 
 } // TargetPrefix = "riscv"

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index f1b3cf711c52..4c873a0482f9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -220,9 +220,12 @@ void RISCVDAGToDAGISel::selectVLSEGMask(SDNode *Node, unsigned IntNo,
   CurDAG->RemoveDeadNode(Node);
 }
 
-void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo) {
+void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo,
+                                    bool IsStrided) {
   SDLoc DL(Node);
   unsigned NF = Node->getNumOperands() - 4;
+  if (IsStrided)
+    NF--;
   EVT VT = Node->getOperand(2)->getValueType(0);
   unsigned ScalarSize = VT.getScalarSizeInBits();
   MVT XLenVT = Subtarget->getXLenVT();
@@ -230,10 +233,17 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo) {
   SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
-  SDValue Operands[] = {StoreVal,
-                        Node->getOperand(2 + NF),  // Base pointer.
-                        Node->getOperand(3 + NF),  // VL.
-                        SEW, Node->getOperand(0)}; // Chain
+  SmallVector<SDValue, 6> Operands;
+  Operands.push_back(StoreVal);
+  Operands.push_back(Node->getOperand(2 + NF)); // Base pointer.
+  if (IsStrided) {
+    Operands.push_back(Node->getOperand(3 + NF)); // Stride.
+    Operands.push_back(Node->getOperand(4 + NF)); // VL.
+  } else {
+    Operands.push_back(Node->getOperand(3 + NF)); // VL.
+  }
+  Operands.push_back(SEW);
+  Operands.push_back(Node->getOperand(0)); // Chain.
   const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
       IntNo, ScalarSize, static_cast<unsigned>(LMUL));
   SDNode *Store =
@@ -241,9 +251,12 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned IntNo) {
   ReplaceNode(Node, Store);
 }
 
-void RISCVDAGToDAGISel::selectVSSEGMask(SDNode *Node, unsigned IntNo) {
+void RISCVDAGToDAGISel::selectVSSEGMask(SDNode *Node, unsigned IntNo,
+                                        bool IsStrided) {
   SDLoc DL(Node);
   unsigned NF = Node->getNumOperands() - 5;
+  if (IsStrided)
+    NF--;
   EVT VT = Node->getOperand(2)->getValueType(0);
   unsigned ScalarSize = VT.getScalarSizeInBits();
   MVT XLenVT = Subtarget->getXLenVT();
@@ -251,12 +264,19 @@ void RISCVDAGToDAGISel::selectVSSEGMask(SDNode *Node, unsigned IntNo) {
   SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
   SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
   SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL);
-  SDValue Operands[] = {StoreVal,
-                        Node->getOperand(2 + NF), // Base pointer.
-                        Node->getOperand(3 + NF), // Mask.
-                        Node->getOperand(4 + NF), // VL.
-                        SEW,
-                        Node->getOperand(0)}; // Chain
+  SmallVector<SDValue, 7> Operands;
+  Operands.push_back(StoreVal);
+  Operands.push_back(Node->getOperand(2 + NF)); // Base pointer.
+  if (IsStrided) {
+    Operands.push_back(Node->getOperand(3 + NF)); // Stride.
+    Operands.push_back(Node->getOperand(4 + NF)); // Mask.
+    Operands.push_back(Node->getOperand(5 + NF)); // VL.
+  } else {
+    Operands.push_back(Node->getOperand(3 + NF)); // Mask.
+    Operands.push_back(Node->getOperand(4 + NF)); // VL.
+  }
+  Operands.push_back(SEW);
+  Operands.push_back(Node->getOperand(0)); // Chain.
   const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
       IntNo, ScalarSize, static_cast<unsigned>(LMUL));
   SDNode *Store =
@@ -439,7 +459,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     case Intrinsic::riscv_vsseg6:
     case Intrinsic::riscv_vsseg7:
     case Intrinsic::riscv_vsseg8: {
-      selectVSSEG(Node, IntNo);
+      selectVSSEG(Node, IntNo, /*IsStrided=*/false);
       return;
     }
     case Intrinsic::riscv_vsseg2_mask:
@@ -449,7 +469,27 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
     case Intrinsic::riscv_vsseg6_mask:
     case Intrinsic::riscv_vsseg7_mask:
     case Intrinsic::riscv_vsseg8_mask: {
-      selectVSSEGMask(Node, IntNo);
+      selectVSSEGMask(Node, IntNo, /*IsStrided=*/false);
+      return;
+    }
+    case Intrinsic::riscv_vssseg2:
+    case Intrinsic::riscv_vssseg3:
+    case Intrinsic::riscv_vssseg4:
+    case Intrinsic::riscv_vssseg5:
+    case Intrinsic::riscv_vssseg6:
+    case Intrinsic::riscv_vssseg7:
+    case Intrinsic::riscv_vssseg8: {
+      selectVSSEG(Node, IntNo, /*IsStrided=*/true);
+      return;
+    }
+    case Intrinsic::riscv_vssseg2_mask:
+    case Intrinsic::riscv_vssseg3_mask:
+    case Intrinsic::riscv_vssseg4_mask:
+    case Intrinsic::riscv_vssseg5_mask:
+    case Intrinsic::riscv_vssseg6_mask:
+    case Intrinsic::riscv_vssseg7_mask:
+    case Intrinsic::riscv_vssseg8_mask: {
+      selectVSSEGMask(Node, IntNo, /*IsStrided=*/true);
       return;
     }
     }

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 8215abbe60e9..2069afa363fe 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -57,8 +57,8 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
 
   void selectVLSEG(SDNode *Node, unsigned IntNo, bool IsStrided);
   void selectVLSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided);
-  void selectVSSEG(SDNode *Node, unsigned IntNo);
-  void selectVSSEGMask(SDNode *Node, unsigned IntNo);
+  void selectVSSEG(SDNode *Node, unsigned IntNo, bool IsStrided);
+  void selectVSSEGMask(SDNode *Node, unsigned IntNo, bool IsStrided);
 
 // Include the pieces autogenerated from the target description.
 #include "RISCVGenDAGISel.inc"

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 71bb8562bedd..1d909e6c1a39 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -440,7 +440,8 @@ class PseudoToVInst<string PseudoInst> {
 class ToLowerCase<string Upper> {
   string L = !subst("VLSEG", "vlseg",
              !subst("VLSSEG", "vlsseg",
-             !subst("VSSEG", "vsseg", Upper)));
+             !subst("VSSEG", "vsseg",
+             !subst("VSSSEG", "vssseg", Upper))));
 }
 
 // Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2
@@ -1076,6 +1077,38 @@ class VPseudoUSSegStoreMask<VReg ValClass, bits<11> EEW>:
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
+class VPseudoSSegStoreNoMask<VReg ValClass, bits<11> EEW>:
+      Pseudo<(outs),
+             (ins ValClass:$rd, GPR:$rs1, GPR: $offset, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo,
+      RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
+  let mayLoad = 0;
+  let mayStore = 1;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoSSegStoreMask<VReg ValClass, bits<11> EEW>:
+      Pseudo<(outs),
+             (ins ValClass:$rd, GPR:$rs1, GPR: $offset,
+                  VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo,
+      RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
+  let mayLoad = 0;
+  let mayStore = 1;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let HasVLOp = 1;
+  let HasSEWOp = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
 multiclass VPseudoUSLoad {
   foreach lmul = MxList.m in {
     defvar LInfo = lmul.MX;
@@ -1629,6 +1662,21 @@ multiclass VPseudoUSSegStore {
   }
 }
 
+multiclass VPseudoSSegStore {
+  foreach eew = EEWList in {
+    foreach lmul = MxSet<eew>.m in {
+      defvar LInfo = lmul.MX;
+      let VLMul = lmul.value in {
+        foreach nf = NFSet<lmul>.L in {
+          defvar vreg = SegRegClass<lmul, nf>.RC;
+          def nf # "E" # eew # "_V_" # LInfo : VPseudoSSegStoreNoMask<vreg, eew>;
+          def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoSSegStoreMask<vreg, eew>;
+        }
+      }
+    }
+  }
+}
+
 //===----------------------------------------------------------------------===//
 // Helpers to define the intrinsic patterns.
 //===----------------------------------------------------------------------===//
@@ -2830,6 +2878,7 @@ foreach eew = EEWList in {
 defm PseudoVLSEG : VPseudoUSSegLoad;
 defm PseudoVLSSEG : VPseudoSSegLoad;
 defm PseudoVSSEG : VPseudoUSSegStore;
+defm PseudoVSSSEG : VPseudoSSegStore;
 
 //===----------------------------------------------------------------------===//
 // 8. Vector AMO Operations

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll
new file mode 100644
index 000000000000..68debc96a8bc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll
@@ -0,0 +1,4410 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare void @llvm.riscv.vssseg2.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, i32, <vscale x 16 x i1>, i32)
+
+define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg2_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg3_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg4_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg5_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg6_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg7_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i32, <vscale x 16 x i1>, i32)
+
+define void @test_vssseg2_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i32, <vscale x 16 x i1>, i32)
+
+define void @test_vssseg3_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i32, <vscale x 16 x i1>, i32)
+
+define void @test_vssseg4_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg2_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg3_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg4_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg5_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg6_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg7_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg8_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg2_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg3_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg4_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg5_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg6_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg7_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg8_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg2_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg3_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg4_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg5_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg6_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg7_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg8_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg2_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg3_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg4_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg2_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg3_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg4_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg5_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg6_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg7_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg2_nxv8i32(<vscale x 8 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv8i32(<vscale x 8 x i32> %val, i32* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg2_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg3_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg4_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg5_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg6_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg7_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg2_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg3_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg4_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg5_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg6_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg7_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg8_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, i32, <vscale x 32 x i1>, i32)
+
+define void @test_vssseg2_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i32 %offset, <vscale x 32 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, i32 %offset, <vscale x 32 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg2_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg3_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg4_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg5_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg6_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg7_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg2_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg3_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg4_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg5_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg6_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg7_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg8_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg2_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg3_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg4_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, half*, i32, <vscale x 16 x i1>, i32)
+
+define void @test_vssseg2_nxv16f16(<vscale x 16 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv16f16(<vscale x 16 x half> %val, half* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, double*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, double*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg2_nxv4f64(<vscale x 4 x double> %val, double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4f64(<vscale x 4 x double> %val, double* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg2_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg3_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg4_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg5_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg5e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg5e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg6_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg6e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg6e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg7_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg7e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg7e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg8_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg8e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg8e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg2_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg3_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg4_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg5_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg6_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg7_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg8_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg2_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg3_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg4_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg5_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg6_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg7_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg8_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg2_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg3_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg4_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg5_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg6_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg7_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i32, <vscale x 1 x i1>, i32)
+
+define void @test_vssseg8_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, half*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg2_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg3_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg4_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, float*, i32, <vscale x 8 x i1>, i32)
+
+define void @test_vssseg2_nxv8f32(<vscale x 8 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv8f32(<vscale x 8 x float> %val, float* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, double*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, double*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg2_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg3_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg4_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg2_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg3_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg4_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg5_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg6_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg7_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg8_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg2_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg3_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg4_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg5_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg6_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg7_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, i32)
+declare void @llvm.riscv.vssseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i32, <vscale x 2 x i1>, i32)
+
+define void @test_vssseg8_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, float*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg2_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg3_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i32, i32)
+declare void @llvm.riscv.vssseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i32, <vscale x 4 x i1>, i32)
+
+define void @test_vssseg4_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i32 %offset, i32 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll
new file mode 100644
index 000000000000..3ba3e0ef19cd
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll
@@ -0,0 +1,4777 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \
+; RUN:     -verify-machineinstrs < %s | FileCheck %s
+
+declare void @llvm.riscv.vssseg2.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, i64, <vscale x 16 x i1>, i64)
+
+define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv16i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg2_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg3_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg4_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv4i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, <vscale x 16 x i1>, i64)
+
+define void @test_vssseg2_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, <vscale x 16 x i1>, i64)
+
+define void @test_vssseg3_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, i64, <vscale x 16 x i1>, i64)
+
+define void @test_vssseg4_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv16i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m2,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg2_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg3_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg4_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg5_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg5e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg5e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg6_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg6e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg6e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg7_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg7e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg7e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg8_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg8e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg8e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg2_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg3_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg4_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg5_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg6_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg7_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg8_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg2_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg3_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg4_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv8i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg2_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg3_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg4_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg5_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg6_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg7_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv4i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg2_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg3_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg4_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg5_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg6_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg7_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg8_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg2_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg3_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg4_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg5_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg6_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg7_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg8_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv2i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg2_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg3_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg4_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg5_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg6_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg7_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv8i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg2_nxv4i64(<vscale x 4 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4i64(<vscale x 4 x i64> %val, i64* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg2_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg3_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg4_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg5_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg6_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg7_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg8_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv4i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg2_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg3_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg4_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg5_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg6_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg7_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf8,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg2_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg3_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg4_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg5_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg6_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg7_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv2i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg2_nxv8i32(<vscale x 8 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv8i32(<vscale x 8 x i32> %val, i32* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv8i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, i64, <vscale x 32 x i1>, i64)
+
+define void @test_vssseg2_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i64 %offset, <vscale x 32 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv32i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e8,m4,ta,mu
+; CHECK-NEXT:    vssseg2e8.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, i64 %offset, <vscale x 32 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg2_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg3_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg4_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg5_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg6_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg7_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg8_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv2i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg2_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg3_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg4_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, half*, i64, <vscale x 16 x i1>, i64)
+
+define void @test_vssseg2_nxv16f16(<vscale x 16 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv16f16(<vscale x 16 x half> %val, half* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv16f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, double*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, double*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg2_nxv4f64(<vscale x 4 x double> %val, double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4f64(<vscale x 4 x double> %val, double* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m4,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg2_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg3_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg4_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg5_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg5e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg5e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg6_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg6e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg6e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg7_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg7e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg7e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg8_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg8e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT:    vssseg8e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg2_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg3_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg4_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg5_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg6_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg7_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg8_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv2f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg2_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg3_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg4_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg5_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg6_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg7_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg8_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf4,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg2_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg3_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg4_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg5_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg6_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg7_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, i64, <vscale x 1 x i1>, i64)
+
+define void @test_vssseg8_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv1f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg2_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg3_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg4_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv8f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, float*, i64, <vscale x 8 x i1>, i64)
+
+define void @test_vssseg2_nxv8f32(<vscale x 8 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv8f32(<vscale x 8 x float> %val, float* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv8f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m4 killed $v16m4 def $v16m4_v20m4
+; CHECK-NEXT:    vmv4r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m4,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg2_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg2e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg3_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg3e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg4_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2f64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT:    vssseg4e64.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg2_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg3_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg4_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg5_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg6_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg7_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg8_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv4f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg2_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg2e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg3_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg3e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg4_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg4e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg5.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg5_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg5_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg5_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg5e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg5.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg6.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg6_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg6_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg6_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg6e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg6.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg7.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg7_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg7_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg7_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg7e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg7.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg8.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, i64)
+declare void @llvm.riscv.vssseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, i64, <vscale x 2 x i1>, i64)
+
+define void @test_vssseg8_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg8_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg8_mask_nxv2f16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16 killed $v16 def $v16_v17_v18_v19_v20_v21_v22_v23
+; CHECK-NEXT:    vmv1r.v v17, v16
+; CHECK-NEXT:    vmv1r.v v18, v16
+; CHECK-NEXT:    vmv1r.v v19, v16
+; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vsetvli a2, a2, e16,mf2,ta,mu
+; CHECK-NEXT:    vssseg8e16.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg8.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg2.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg2_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg2_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg2_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg2e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg2.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg3.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg3_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg3_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg3_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg3e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg3.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}
+
+declare void @llvm.riscv.vssseg4.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, i64)
+declare void @llvm.riscv.vssseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, i64, <vscale x 4 x i1>, i64)
+
+define void @test_vssseg4_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i64 %offset, i64 %vl)
+  ret void
+}
+
+define void @test_vssseg4_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
+; CHECK-LABEL: test_vssseg4_mask_nxv4f32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    # kill: def $v16m2 killed $v16m2 def $v16m2_v18m2_v20m2_v22m2
+; CHECK-NEXT:    vmv2r.v v18, v16
+; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT:    vssseg4e32.v v16, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+entry:
+  tail call void @llvm.riscv.vssseg4.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl)
+  ret void
+}


        


More information about the llvm-branch-commits mailing list