[llvm-branch-commits] [llvm] 8ca4b17 - [RISCV] Implement vlseg intrinsics.
Hsiangkai Wang via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Jan 19 22:31:03 PST 2021
Author: Hsiangkai Wang
Date: 2021-01-20T14:26:04+08:00
New Revision: 8ca4b174d703e8676c6d47a2e25895c82e2e2ab7
URL: https://github.com/llvm/llvm-project/commit/8ca4b174d703e8676c6d47a2e25895c82e2e2ab7
DIFF: https://github.com/llvm/llvm-project/commit/8ca4b174d703e8676c6d47a2e25895c82e2e2ab7.diff
LOG: [RISCV] Implement vlseg intrinsics.
For Zvlsseg, we need continuous vector registers for the values. We need
to define new register classes for the different combinations of (number
of fields and LMUL). For example,
when the number of fields(NF) = 3, LMUL = 2, the values will be assigned
to (V0M2, V2M2, V4M2), (V2M2, V4M2, V6M2), (V4M2, V6M2, V8M2), ...
We define the vlseg intrinsics with multiple outputs. There is no way to
describe the codegen patterns with multiple outputs in the tablegen
files. We do the codegen in RISCVISelDAGToDAG and use EXTRACT_SUBREG to
extract the values of output.
The multiple scalable vector values will be put into a struct. This
patch is depended on the support for scalable vector struct.
Differential Revision: https://reviews.llvm.org/D94229
Added:
llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/lib/Target/RISCV/RISCVISelLowering.h
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
llvm/lib/Target/RISCV/RISCVRegisterInfo.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index bcf0169f7f31..2775e996d323 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -484,6 +484,24 @@ let TargetPrefix = "riscv" in {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
[NoCapture<ArgIndex<0>>]>, RISCVVIntrinsic;
+ // For unit stride segment load
+ // Input: (pointer, vl)
+ class RISCVUSSegLoad<int nf>
+ : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
+ !add(nf, -1))),
+ [LLVMPointerToElt<0>, llvm_anyint_ty],
+ [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
+ // For unit stride segment load with mask
+ // Input: (maskedoff, pointer, mask, vl)
+ class RISCVUSSegLoadMask<int nf>
+ : Intrinsic<!listconcat([llvm_anyvector_ty], !listsplat(LLVMMatchType<0>,
+ !add(nf, -1))),
+ !listconcat(!listsplat(LLVMMatchType<0>, nf),
+ [LLVMPointerToElt<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_anyint_ty]),
+ [NoCapture<ArgIndex<nf>>, IntrReadMem]>, RISCVVIntrinsic;
+
multiclass RISCVUSLoad {
def "int_riscv_" # NAME : RISCVUSLoad;
def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
@@ -586,6 +604,10 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVAMONoMask;
def "int_riscv_" # NAME # "_mask" : RISCVAMOMask;
}
+ multiclass RISCVUSSegLoad<int nf> {
+ def "int_riscv_" # NAME : RISCVUSSegLoad<nf>;
+ def "int_riscv_" # NAME # "_mask" : RISCVUSSegLoadMask<nf>;
+ }
defm vle : RISCVUSLoad;
defm vleff : RISCVUSLoad;
@@ -877,4 +899,8 @@ let TargetPrefix = "riscv" in {
llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic;
+ foreach nf = [2, 3, 4, 5, 6, 7, 8] in {
+ defm vlseg # nf : RISCVUSSegLoad<nf>;
+ }
+
} // TargetPrefix = "riscv"
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 46e1335b66d6..9a3d700b22d1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -49,6 +49,161 @@ static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm,
return Result;
}
+static RISCVVLMUL getLMUL(EVT VT) {
+ switch (VT.getSizeInBits().getKnownMinValue() / 8) {
+ default:
+ llvm_unreachable("Invalid LMUL.");
+ case 1:
+ return RISCVVLMUL::LMUL_F8;
+ case 2:
+ return RISCVVLMUL::LMUL_F4;
+ case 4:
+ return RISCVVLMUL::LMUL_F2;
+ case 8:
+ return RISCVVLMUL::LMUL_1;
+ case 16:
+ return RISCVVLMUL::LMUL_2;
+ case 32:
+ return RISCVVLMUL::LMUL_4;
+ case 64:
+ return RISCVVLMUL::LMUL_8;
+ }
+}
+
+static unsigned getSubregIndexByEVT(EVT VT, unsigned Index) {
+ RISCVVLMUL LMUL = getLMUL(VT);
+ if (LMUL == RISCVVLMUL::LMUL_F8 || LMUL == RISCVVLMUL::LMUL_F4 ||
+ LMUL == RISCVVLMUL::LMUL_F2 || LMUL == RISCVVLMUL::LMUL_1) {
+ static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
+ "Unexpected subreg numbering");
+ return RISCV::sub_vrm1_0 + Index;
+ } else if (LMUL == RISCVVLMUL::LMUL_2) {
+ static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
+ "Unexpected subreg numbering");
+ return RISCV::sub_vrm2_0 + Index;
+ } else if (LMUL == RISCVVLMUL::LMUL_4) {
+ static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
+ "Unexpected subreg numbering");
+ return RISCV::sub_vrm4_0 + Index;
+ }
+ llvm_unreachable("Invalid vector type.");
+}
+
+static SDValue createTupleImpl(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
+ unsigned RegClassID, unsigned SubReg0) {
+ assert(Regs.size() >= 2 && Regs.size() <= 8);
+
+ SDLoc DL(Regs[0]);
+ SmallVector<SDValue, 8> Ops;
+
+ Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32));
+
+ for (unsigned I = 0; I < Regs.size(); ++I) {
+ Ops.push_back(Regs[I]);
+ Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32));
+ }
+ SDNode *N =
+ CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops);
+ return SDValue(N, 0);
+}
+
+static SDValue createM1Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
+ unsigned NF) {
+ static const unsigned RegClassIDs[] = {
+ RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID,
+ RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID,
+ RISCV::VRN8M1RegClassID};
+
+ return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm1_0);
+}
+
+static SDValue createM2Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
+ unsigned NF) {
+ static const unsigned RegClassIDs[] = {RISCV::VRN2M2RegClassID,
+ RISCV::VRN3M2RegClassID,
+ RISCV::VRN4M2RegClassID};
+
+ return createTupleImpl(CurDAG, Regs, RegClassIDs[NF - 2], RISCV::sub_vrm2_0);
+}
+
+static SDValue createM4Tuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
+ unsigned NF) {
+ return createTupleImpl(CurDAG, Regs, RISCV::VRN2M4RegClassID,
+ RISCV::sub_vrm4_0);
+}
+
+static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs,
+ unsigned NF, RISCVVLMUL LMUL) {
+ switch (LMUL) {
+ default:
+ llvm_unreachable("Invalid LMUL.");
+ case RISCVVLMUL::LMUL_F8:
+ case RISCVVLMUL::LMUL_F4:
+ case RISCVVLMUL::LMUL_F2:
+ case RISCVVLMUL::LMUL_1:
+ return createM1Tuple(CurDAG, Regs, NF);
+ case RISCVVLMUL::LMUL_2:
+ return createM2Tuple(CurDAG, Regs, NF);
+ case RISCVVLMUL::LMUL_4:
+ return createM4Tuple(CurDAG, Regs, NF);
+ }
+}
+
+void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned IntNo) {
+ SDLoc DL(Node);
+ unsigned NF = Node->getNumValues() - 1;
+ EVT VT = Node->getValueType(0);
+ unsigned ScalarSize = VT.getScalarSizeInBits();
+ MVT XLenVT = Subtarget->getXLenVT();
+ RISCVVLMUL LMUL = getLMUL(VT);
+ SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
+ SDValue Operands[] = {Node->getOperand(2), // Base pointer.
+ Node->getOperand(3), // VL.
+ SEW, Node->getOperand(0)}; // Chain
+ const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
+ IntNo, ScalarSize, static_cast<unsigned>(LMUL));
+ SDNode *Load =
+ CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
+ SDValue SuperReg = SDValue(Load, 0);
+ for (unsigned I = 0; I < NF; ++I)
+ ReplaceUses(SDValue(Node, I),
+ CurDAG->getTargetExtractSubreg(getSubregIndexByEVT(VT, I), DL,
+ VT, SuperReg));
+
+ ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
+ CurDAG->RemoveDeadNode(Node);
+}
+
+void RISCVDAGToDAGISel::selectVLSEGMask(SDNode *Node, unsigned IntNo) {
+ SDLoc DL(Node);
+ unsigned NF = Node->getNumValues() - 1;
+ EVT VT = Node->getValueType(0);
+ unsigned ScalarSize = VT.getScalarSizeInBits();
+ MVT XLenVT = Subtarget->getXLenVT();
+ RISCVVLMUL LMUL = getLMUL(VT);
+ SDValue SEW = CurDAG->getTargetConstant(ScalarSize, DL, XLenVT);
+ SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF);
+ SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL);
+ SDValue Operands[] = {MaskedOff,
+ Node->getOperand(NF + 2), // Base pointer.
+ Node->getOperand(NF + 3), // Mask.
+ Node->getOperand(NF + 4), // VL.
+ SEW,
+ Node->getOperand(0)}; // Chain.
+ const RISCVZvlssegTable::RISCVZvlsseg *P = RISCVZvlssegTable::getPseudo(
+ IntNo, ScalarSize, static_cast<unsigned>(LMUL));
+ SDNode *Load =
+ CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands);
+ SDValue SuperReg = SDValue(Load, 0);
+ for (unsigned I = 0; I < NF; ++I)
+ ReplaceUses(SDValue(Node, I),
+ CurDAG->getTargetExtractSubreg(getSubregIndexByEVT(VT, I), DL,
+ VT, SuperReg));
+
+ ReplaceUses(SDValue(Node, NF), SDValue(Load, 1));
+ CurDAG->RemoveDeadNode(Node);
+}
+
void RISCVDAGToDAGISel::Select(SDNode *Node) {
// If we have a custom node, we have already selected.
if (Node->isMachineOpcode()) {
@@ -171,6 +326,26 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
/* Chain */ Node->getOperand(0)));
return;
}
+ case Intrinsic::riscv_vlseg2:
+ case Intrinsic::riscv_vlseg3:
+ case Intrinsic::riscv_vlseg4:
+ case Intrinsic::riscv_vlseg5:
+ case Intrinsic::riscv_vlseg6:
+ case Intrinsic::riscv_vlseg7:
+ case Intrinsic::riscv_vlseg8: {
+ selectVLSEG(Node, IntNo);
+ return;
+ }
+ case Intrinsic::riscv_vlseg2_mask:
+ case Intrinsic::riscv_vlseg3_mask:
+ case Intrinsic::riscv_vlseg4_mask:
+ case Intrinsic::riscv_vlseg5_mask:
+ case Intrinsic::riscv_vlseg6_mask:
+ case Intrinsic::riscv_vlseg7_mask:
+ case Intrinsic::riscv_vlseg8_mask: {
+ selectVLSEGMask(Node, IntNo);
+ return;
+ }
}
break;
}
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 43efb6d26b96..67bafbe32ce2 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -55,6 +55,9 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal);
bool selectVSplatUimm5(SDValue N, SDValue &SplatVal);
+ void selectVLSEG(SDNode *Node, unsigned IntNo);
+ void selectVLSEGMask(SDNode *Node, unsigned IntNo);
+
// Include the pieces autogenerated from the target description.
#include "RISCVGenDAGISel.inc"
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 4359d24acafc..a84d3a4dd006 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4268,4 +4268,11 @@ namespace RISCVVIntrinsicsTable {
#include "RISCVGenSearchableTables.inc"
} // namespace RISCVVIntrinsicsTable
+
+namespace RISCVZvlssegTable {
+
+#define GET_RISCVZvlssegTable_IMPL
+#include "RISCVGenSearchableTables.inc"
+
+} // namespace RISCVZvlssegTable
} // namespace llvm
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index b5b6077d5ccb..58bb331ccd01 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -318,6 +318,22 @@ using namespace RISCV;
#include "RISCVGenSearchableTables.inc"
} // end namespace RISCVVIntrinsicsTable
+
+namespace RISCVZvlssegTable {
+
+struct RISCVZvlsseg {
+ unsigned int IntrinsicID;
+ unsigned int SEW;
+ unsigned int LMUL;
+ unsigned int Pseudo;
+};
+
+using namespace RISCV;
+
+#define GET_RISCVZvlssegTable_DECL
+#include "RISCVGenSearchableTables.inc"
+
+} // namespace RISCVZvlssegTable
}
#endif
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 4e08ab0d563c..4ad21e1a8e15 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -80,6 +80,13 @@ class MxSet<int eew> {
!eq(eew, 64) : [V_M1, V_M2, V_M4, V_M8]);
}
+class NFSet<LMULInfo m> {
+ list<int> L = !cond(!eq(m.value, V_M8.value): [],
+ !eq(m.value, V_M4.value): [2],
+ !eq(m.value, V_M2.value): [2, 3, 4],
+ true: [2, 3, 4, 5, 6, 7, 8]);
+}
+
class shift_amount<int num> {
int val = !if(!eq(num, 1), 0, !add(1, shift_amount<!srl(num, 1)>.val));
}
@@ -123,6 +130,13 @@ class ToFPR32<ValueType type, DAGOperand operand, string name> {
!dag(type, [operand], [name]));
}
+class SegRegClass<LMULInfo m, int nf> {
+ VReg RC = !cast<VReg>("VRN" # nf # !cond(!eq(m.value, V_MF8.value): V_M1.MX,
+ !eq(m.value, V_MF4.value): V_M1.MX,
+ !eq(m.value, V_MF2.value): V_M1.MX,
+ true: m.MX));
+}
+
//===----------------------------------------------------------------------===//
// Vector register and vector group type information.
//===----------------------------------------------------------------------===//
@@ -386,6 +400,20 @@ def RISCVVIntrinsicsTable : GenericTable {
let PrimaryKeyName = "getRISCVVIntrinsicInfo";
}
+class RISCVZvlsseg<string IntrName, bits<11> S, bits<3> L> {
+ Intrinsic IntrinsicID = !cast<Intrinsic>(IntrName);
+ bits<11> SEW = S;
+ bits<3> LMUL = L;
+ Pseudo Pseudo = !cast<Pseudo>(NAME);
+}
+
+def RISCVZvlssegTable : GenericTable {
+ let FilterClass = "RISCVZvlsseg";
+ let Fields = ["IntrinsicID", "SEW", "LMUL", "Pseudo"];
+ let PrimaryKey = ["IntrinsicID", "SEW", "LMUL"];
+ let PrimaryKeyName = "getPseudo";
+}
+
//===----------------------------------------------------------------------===//
// Helpers to define the
diff erent pseudo instructions.
//===----------------------------------------------------------------------===//
@@ -409,6 +437,23 @@ class PseudoToVInst<string PseudoInst> {
!subst("Pseudo", "", PseudoInst))))))))))))))));
}
+class ToLowerCase<string Upper> {
+ string L = !subst("VLSEG", "vlseg", Upper);
+}
+
+// Example: PseudoVLSEG2E32_V_M2 -> int_riscv_vlseg2
+// Example: PseudoVLSEG2E32_V_M2_MASK -> int_riscv_vlseg2_mask
+class PseudoToIntrinsic<string PseudoInst, bit IsMasked> {
+ string Intrinsic = !strconcat("int_riscv_",
+ ToLowerCase<
+ !subst("E8", "",
+ !subst("E16", "",
+ !subst("E32", "",
+ !subst("E64", "",
+ !subst("_V", "", PseudoToVInst<PseudoInst>.VInst)))))>.L,
+ !if(IsMasked, "_mask", ""));
+}
+
// The destination vector register group for a masked vector instruction cannot
// overlap the source mask register (v0), unless the destination vector register
// is being written with a mask value (e.g., comparisons) or the scalar result
@@ -929,6 +974,40 @@ multiclass VPseudoAMO {
defm "EI" # eew : VPseudoAMOEI<eew>;
}
+class VPseudoUSSegLoadNoMask<VReg RetClass, bits<11> EEW>:
+ Pseudo<(outs RetClass:$rd),
+ (ins GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
+ RISCVVPseudo,
+ RISCVZvlsseg<PseudoToIntrinsic<NAME, false>.Intrinsic, EEW, VLMul> {
+ let mayLoad = 1;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let usesCustomInserter = 1;
+ let Uses = [VL, VTYPE];
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasDummyMask = 1;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoUSSegLoadMask<VReg RetClass, bits<11> EEW>:
+ Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+ (ins GetVRegNoV0<RetClass>.R:$merge, GPR:$rs1,
+ VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
+ RISCVVPseudo,
+ RISCVZvlsseg<PseudoToIntrinsic<NAME, true>.Intrinsic, EEW, VLMul> {
+ let mayLoad = 1;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let usesCustomInserter = 1;
+ let Constraints = "$rd = $merge";
+ let Uses = [VL, VTYPE];
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasMergeOp = 1;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
multiclass VPseudoUSLoad {
foreach lmul = MxList.m in {
defvar LInfo = lmul.MX;
@@ -1437,6 +1516,21 @@ multiclass VPseudoConversionV_W {
defm _W : VPseudoConversion<m.vrclass, m.wvrclass, m, constraint>;
}
+multiclass VPseudoUSSegLoad {
+ foreach eew = EEWList in {
+ foreach lmul = MxSet<eew>.m in {
+ defvar LInfo = lmul.MX;
+ let VLMul = lmul.value in {
+ foreach nf = NFSet<lmul>.L in {
+ defvar vreg = SegRegClass<lmul, nf>.RC;
+ def nf # "E" # eew # "_V_" # LInfo : VPseudoUSSegLoadNoMask<vreg, eew>;
+ def nf # "E" # eew # "_V_" # LInfo # "_MASK" : VPseudoUSSegLoadMask<vreg, eew>;
+ }
+ }
+ }
+ }
+}
+
//===----------------------------------------------------------------------===//
// Helpers to define the intrinsic patterns.
//===----------------------------------------------------------------------===//
@@ -2632,6 +2726,11 @@ foreach eew = EEWList in {
defm PseudoVLE # eew # FF : VPseudoUSLoad;
}
+//===----------------------------------------------------------------------===//
+// 7.8. Vector Load/Store Segment Instructions
+//===----------------------------------------------------------------------===//
+defm PseudoVLSEG : VPseudoUSSegLoad;
+
//===----------------------------------------------------------------------===//
// 8. Vector AMO Operations
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
index 45cc94591eb1..31dc47a15787 100644
--- a/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
+++ b/llvm/lib/Target/RISCV/RISCVMCInstLower.cpp
@@ -176,7 +176,7 @@ static bool lowerRISCVVMachineInstrToMCInst(const MachineInstr *MI,
if (RISCV::VRM2RegClass.contains(Reg) ||
RISCV::VRM4RegClass.contains(Reg) ||
RISCV::VRM8RegClass.contains(Reg)) {
- Reg = TRI->getSubReg(Reg, RISCV::sub_vrm2);
+ Reg = TRI->getSubReg(Reg, RISCV::sub_vrm1_0);
assert(Reg && "Subregister does not exist");
}
diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
index 75615fd334b7..3b79a10f111b 100644
--- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td
@@ -51,12 +51,21 @@ class RISCVRegWithSubRegs<bits<5> Enc, string n, list<Register> subregs,
def ABIRegAltName : RegAltNameIndex;
-def sub_vrm2 : SubRegIndex<64, -1>;
-def sub_vrm2_hi : SubRegIndex<64, -1>;
-def sub_vrm4 : SubRegIndex<128, -1>;
-def sub_vrm4_hi : SubRegIndex<128, -1>;
-def sub_vrm8 : SubRegIndex<256, -1>;
-def sub_vrm8_hi : SubRegIndex<256, -1>;
+def sub_vrm1_0 : SubRegIndex<64, -1>;
+def sub_vrm1_1 : SubRegIndex<64, -1>;
+def sub_vrm1_2 : SubRegIndex<64, -1>;
+def sub_vrm1_3 : SubRegIndex<64, -1>;
+def sub_vrm1_4 : SubRegIndex<64, -1>;
+def sub_vrm1_5 : SubRegIndex<64, -1>;
+def sub_vrm1_6 : SubRegIndex<64, -1>;
+def sub_vrm1_7 : SubRegIndex<64, -1>;
+def sub_vrm2_0 : SubRegIndex<128, -1>;
+def sub_vrm2_1 : SubRegIndex<128, -1>;
+def sub_vrm2_2 : SubRegIndex<128, -1>;
+def sub_vrm2_3 : SubRegIndex<128, -1>;
+def sub_vrm4_0 : SubRegIndex<256, -1>;
+def sub_vrm4_1 : SubRegIndex<256, -1>;
+
} // Namespace = "RISCV"
// Integer registers
@@ -340,6 +349,65 @@ defvar vbool16_t = nxv4i1;
defvar vbool32_t = nxv2i1;
defvar vbool64_t = nxv1i1;
+// There is no need to define register classes for fractional LMUL.
+def LMULList {
+ list<int> m = [1, 2, 4, 8];
+}
+
+//===----------------------------------------------------------------------===//
+// Utility classes for segment load/store.
+//===----------------------------------------------------------------------===//
+// The set of legal NF for LMUL = lmul.
+// LMUL == 1, NF = 2, 3, 4, 5, 6, 7, 8
+// LMUL == 2, NF = 2, 3, 4
+// LMUL == 4, NF = 2
+class NFList<int lmul> {
+ list<int> L = !cond(!eq(lmul, 1): [2, 3, 4, 5, 6, 7, 8],
+ !eq(lmul, 2): [2, 3, 4],
+ !eq(lmul, 4): [2],
+ !eq(lmul, 8): []);
+}
+
+// Generate [start, end) SubRegIndex list.
+class SubRegSet<list<SubRegIndex> LIn, int start, int nf, int lmul> {
+ list<SubRegIndex> L = !foldl([]<SubRegIndex>,
+ [0, 1, 2, 3, 4, 5, 6, 7],
+ AccList, i,
+ !listconcat(AccList,
+ !if(!lt(i, nf),
+ [!cast<SubRegIndex>("sub_vrm" # lmul # "_" # i)],
+ [])));
+}
+
+class IndexSet<int index, int nf, int lmul> {
+ list<int> R =
+ !foldl([]<int>,
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 28, 29, 30, 31],
+ L, i,
+ !listconcat(L,
+ !if(!and(
+ !le(!mul(index, lmul), !mul(i, lmul)),
+ !le(!mul(i, lmul),
+ !sub(!add(32, !mul(index, lmul)), !mul(nf, lmul)))
+ ), [!mul(i, lmul)], [])));
+}
+
+class VRegList<list<dag> LIn, int start, int nf, int lmul> {
+ list<dag> L =
+ !if(!ge(start, nf),
+ LIn,
+ !listconcat(
+ [!dag(add,
+ !foreach(i, IndexSet<start, nf, lmul>.R,
+ !cast<Register>("V" # i # !cond(!eq(lmul, 2): "M2",
+ !eq(lmul, 4): "M4",
+ true: ""))),
+ !listsplat("", !size(IndexSet<start, nf, lmul>.R)))],
+ VRegList<LIn, !add(start, 1), nf, lmul>.L));
+}
+
// Vector registers
let RegAltNameIndices = [ABIRegAltName] in {
foreach Index = 0-31 in {
@@ -353,7 +421,7 @@ let RegAltNameIndices = [ABIRegAltName] in {
!cast<Register>("V"#!add(Index, 1))],
["v"#Index]>,
DwarfRegAlias<!cast<Register>("V"#Index)> {
- let SubRegIndices = [sub_vrm2, sub_vrm2_hi];
+ let SubRegIndices = [sub_vrm1_0, sub_vrm1_1];
}
}
@@ -363,7 +431,7 @@ let RegAltNameIndices = [ABIRegAltName] in {
!cast<Register>("V"#!add(Index, 2)#"M2")],
["v"#Index]>,
DwarfRegAlias<!cast<Register>("V"#Index)> {
- let SubRegIndices = [sub_vrm4, sub_vrm4_hi];
+ let SubRegIndices = [sub_vrm2_0, sub_vrm2_1];
}
}
@@ -373,7 +441,7 @@ let RegAltNameIndices = [ABIRegAltName] in {
!cast<Register>("V"#!add(Index, 4)#"M4")],
["v"#Index]>,
DwarfRegAlias<!cast<Register>("V"#Index)> {
- let SubRegIndices = [sub_vrm8, sub_vrm8_hi];
+ let SubRegIndices = [sub_vrm4_0, sub_vrm4_1];
}
}
@@ -383,6 +451,13 @@ let RegAltNameIndices = [ABIRegAltName] in {
def VXRM : RISCVReg<0, "vxrm", ["vxrm"]>;
}
+foreach m = [1, 2, 4] in {
+ foreach n = NFList<m>.L in {
+ def "VN" # n # "M" # m: RegisterTuples<SubRegSet<[], 0, n, m>.L,
+ VRegList<[], 0, n, m>.L>;
+ }
+}
+
class VReg<list<ValueType> regTypes, dag regList, int Vlmul>
: RegisterClass<"RISCV",
regTypes,
@@ -446,3 +521,11 @@ defvar VMaskVTs = [vbool64_t, vbool32_t, vbool16_t, vbool8_t,
def VMV0 : RegisterClass<"RISCV", VMaskVTs, 64, (add V0)> {
let Size = 64;
}
+
+foreach m = LMULList.m in {
+ foreach nf = NFList<m>.L in {
+ def "VRN" # nf # "M" # m : VReg<[untyped],
+ (add !cast<RegisterTuples>("VN" # nf # "M" # m)),
+ !mul(nf, m)>;
+ }
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll
new file mode 100644
index 000000000000..4e80049e4c64
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll
@@ -0,0 +1,4722 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(i16* , i32)
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i16> @test_vlseg2_nxv16i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
+ ret <vscale x 16 x i16> %1
+}
+
+define <vscale x 16 x i16> @test_vlseg2_mask_nxv16i16(i16* %base, i32 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
+ %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.mask.nxv16i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 16 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
+ ret <vscale x 16 x i16> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(i8* , i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlseg2_nxv1i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg2_mask_nxv1i8(i8* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(i8* , i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlseg3_nxv1i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg3_mask_nxv1i8(i8* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(i8* , i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlseg4_nxv1i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg4_mask_nxv1i8(i8* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(i8* , i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlseg5_nxv1i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg5_mask_nxv1i8(i8* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(i8* , i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlseg6_nxv1i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg6_mask_nxv1i8(i8* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(i8* , i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlseg7_nxv1i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg7_mask_nxv1i8(i8* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(i8* , i32)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @test_vlseg8_nxv1i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg8_mask_nxv1i8(i8* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(i8* , i32)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @test_vlseg2_nxv16i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg2e8.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+ ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlseg2_mask_nxv16i8(i8* %base, i32 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg2e8.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vlseg2e8.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+ %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+ ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(i8* , i32)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @test_vlseg3_nxv16i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg3e8.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+ ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlseg3_mask_nxv16i8(i8* %base, i32 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg3e8.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vlseg3e8.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+ %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+ ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(i8* , i32)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @test_vlseg4_nxv16i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg4e8.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+ ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlseg4_mask_nxv16i8(i8* %base, i32 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg4e8.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vlseg4e8.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+ %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+ ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(i32* , i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlseg2_nxv2i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg2_mask_nxv2i32(i32* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(i32* , i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlseg3_nxv2i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg3_mask_nxv2i32(i32* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(i32* , i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlseg4_nxv2i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg4_mask_nxv2i32(i32* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(i32* , i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlseg5_nxv2i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg5_mask_nxv2i32(i32* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(i32* , i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlseg6_nxv2i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg6_mask_nxv2i32(i32* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(i32* , i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlseg7_nxv2i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg7_mask_nxv2i32(i32* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(i32* , i32)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @test_vlseg8_nxv2i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg8_mask_nxv2i32(i32* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(i16* , i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlseg2_nxv4i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg2_mask_nxv4i16(i16* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(i16* , i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlseg3_nxv4i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg3_mask_nxv4i16(i16* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(i16* , i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlseg4_nxv4i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg4_mask_nxv4i16(i16* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(i16* , i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlseg5_nxv4i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg5_mask_nxv4i16(i16* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(i16* , i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlseg6_nxv4i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg6_mask_nxv4i16(i16* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(i16* , i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlseg7_nxv4i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg7_mask_nxv4i16(i16* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(i16* , i32)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @test_vlseg8_nxv4i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg8_mask_nxv4i16(i16* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(i32* , i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlseg2_nxv1i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg2_mask_nxv1i32(i32* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(i32* , i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlseg3_nxv1i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg3_mask_nxv1i32(i32* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(i32* , i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlseg4_nxv1i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg4_mask_nxv1i32(i32* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(i32* , i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlseg5_nxv1i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg5_mask_nxv1i32(i32* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(i32* , i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlseg6_nxv1i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg6_mask_nxv1i32(i32* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(i32* , i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlseg7_nxv1i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg7_mask_nxv1i32(i32* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(i32* , i32)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @test_vlseg8_nxv1i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg8_mask_nxv1i32(i32* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(i16* , i32)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @test_vlseg2_nxv8i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+ ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlseg2_mask_nxv8i16(i16* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+ %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+ ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(i16* , i32)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @test_vlseg3_nxv8i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+ ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlseg3_mask_nxv8i16(i16* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+ %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+ ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(i16* , i32)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @test_vlseg4_nxv8i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+ ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlseg4_mask_nxv8i16(i16* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+ %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+ ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(i8* , i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlseg2_nxv8i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg2_mask_nxv8i8(i8* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(i8* , i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlseg3_nxv8i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg3_mask_nxv8i8(i8* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(i8* , i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlseg4_nxv8i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg4_mask_nxv8i8(i8* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(i8* , i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlseg5_nxv8i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg5_mask_nxv8i8(i8* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(i8* , i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlseg6_nxv8i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg6_mask_nxv8i8(i8* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(i8* , i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlseg7_nxv8i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg7_mask_nxv8i8(i8* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(i8* , i32)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @test_vlseg8_nxv8i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg8_mask_nxv8i8(i8* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(i32* , i32)
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @test_vlseg2_nxv8i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
+ ret <vscale x 8 x i32> %1
+}
+
+define <vscale x 8 x i32> @test_vlseg2_mask_nxv8i32(i32* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
+ %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.mask.nxv8i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
+ ret <vscale x 8 x i32> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(i8* , i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlseg2_nxv4i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg2_mask_nxv4i8(i8* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(i8* , i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlseg3_nxv4i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg3_mask_nxv4i8(i8* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(i8* , i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlseg4_nxv4i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg4_mask_nxv4i8(i8* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(i8* , i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlseg5_nxv4i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg5_mask_nxv4i8(i8* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(i8* , i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlseg6_nxv4i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg6_mask_nxv4i8(i8* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(i8* , i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlseg7_nxv4i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg7_mask_nxv4i8(i8* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(i8* , i32)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @test_vlseg8_nxv4i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg8_mask_nxv4i8(i8* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(i16* , i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlseg2_nxv1i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg2_mask_nxv1i16(i16* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(i16* , i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlseg3_nxv1i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg3_mask_nxv1i16(i16* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(i16* , i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlseg4_nxv1i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg4_mask_nxv1i16(i16* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(i16* , i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlseg5_nxv1i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg5_mask_nxv1i16(i16* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(i16* , i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlseg6_nxv1i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg6_mask_nxv1i16(i16* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(i16* , i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlseg7_nxv1i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg7_mask_nxv1i16(i16* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(i16* , i32)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @test_vlseg8_nxv1i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg8_mask_nxv1i16(i16* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(i8* , i32)
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @test_vlseg2_nxv32i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vlseg2e8.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
+ ret <vscale x 32 x i8> %1
+}
+
+define <vscale x 32 x i8> @test_vlseg2_mask_nxv32i8(i8* %base, i32 %vl, <vscale x 32 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu
+; CHECK-NEXT: vlseg2e8.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu
+; CHECK-NEXT: vlseg2e8.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
+ %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.mask.nxv32i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 32 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
+ ret <vscale x 32 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(i8* , i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlseg2_nxv2i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg2_mask_nxv2i8(i8* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(i8* , i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlseg3_nxv2i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg3_mask_nxv2i8(i8* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(i8* , i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlseg4_nxv2i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg4_mask_nxv2i8(i8* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(i8* , i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlseg5_nxv2i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg5_mask_nxv2i8(i8* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(i8* , i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlseg6_nxv2i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg6_mask_nxv2i8(i8* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(i8* , i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlseg7_nxv2i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg7_mask_nxv2i8(i8* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(i8* , i32)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @test_vlseg8_nxv2i8(i8* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg8_mask_nxv2i8(i8* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(i16* , i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlseg2_nxv2i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg2_mask_nxv2i16(i16* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(i16* , i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlseg3_nxv2i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg3_mask_nxv2i16(i16* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(i16* , i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlseg4_nxv2i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg4_mask_nxv2i16(i16* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(i16* , i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlseg5_nxv2i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg5_mask_nxv2i16(i16* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(i16* , i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlseg6_nxv2i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg6_mask_nxv2i16(i16* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(i16* , i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlseg7_nxv2i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg7_mask_nxv2i16(i16* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(i16* , i32)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @test_vlseg8_nxv2i16(i16* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg8_mask_nxv2i16(i16* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(i32* , i32)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @test_vlseg2_nxv4i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+ ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlseg2_mask_nxv4i32(i32* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+ %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+ ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(i32* , i32)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @test_vlseg3_nxv4i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+ ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlseg3_mask_nxv4i32(i32* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+ %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+ ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(i32* , i32)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @test_vlseg4_nxv4i32(i32* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+ ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlseg4_mask_nxv4i32(i32* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+ %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+ ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(half* , i32)
+declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x half> @test_vlseg2_nxv16f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
+ ret <vscale x 16 x half> %1
+}
+
+define <vscale x 16 x half> @test_vlseg2_mask_nxv16f16(half* %base, i32 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
+ %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.mask.nxv16f16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 16 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
+ ret <vscale x 16 x half> %3
+}
+
+declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(double* , i32)
+declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x double> @test_vlseg2_nxv4f64(double* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vlseg2e64.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
+ ret <vscale x 4 x double> %1
+}
+
+define <vscale x 4 x double> @test_vlseg2_mask_nxv4f64(double* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu
+; CHECK-NEXT: vlseg2e64.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vlseg2e64.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
+ %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.mask.nxv4f64(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
+ ret <vscale x 4 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(double* , i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlseg2_nxv1f64(double* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg2e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg2_mask_nxv1f64(double* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg2e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg2e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(double* , i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlseg3_nxv1f64(double* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg3e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg3_mask_nxv1f64(double* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg3e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg3e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(double* , i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlseg4_nxv1f64(double* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg4e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg4_mask_nxv1f64(double* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg4e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg4e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(double* , i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlseg5_nxv1f64(double* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg5e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg5_mask_nxv1f64(double* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg5e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg5e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(double* , i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlseg6_nxv1f64(double* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg6e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg6_mask_nxv1f64(double* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg6e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg6e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(double* , i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlseg7_nxv1f64(double* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg7e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg7_mask_nxv1f64(double* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg7e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg7e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(double* , i32)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x double> @test_vlseg8_nxv1f64(double* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg8e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg8_mask_nxv1f64(double* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg8e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg8e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(float* , i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlseg2_nxv2f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg2_mask_nxv2f32(float* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(float* , i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlseg3_nxv2f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg3_mask_nxv2f32(float* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(float* , i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlseg4_nxv2f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg4_mask_nxv2f32(float* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(float* , i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlseg5_nxv2f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg5_mask_nxv2f32(float* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(float* , i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlseg6_nxv2f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg6_mask_nxv2f32(float* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(float* , i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlseg7_nxv2f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg7_mask_nxv2f32(float* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(float* , i32)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x float> @test_vlseg8_nxv2f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg8_mask_nxv2f32(float* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(half* , i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlseg2_nxv1f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg2_mask_nxv1f16(half* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(half* , i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlseg3_nxv1f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg3_mask_nxv1f16(half* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(half* , i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlseg4_nxv1f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg4_mask_nxv1f16(half* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(half* , i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlseg5_nxv1f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg5_mask_nxv1f16(half* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(half* , i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlseg6_nxv1f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg6_mask_nxv1f16(half* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(half* , i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlseg7_nxv1f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg7_mask_nxv1f16(half* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(half* , i32)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x half> @test_vlseg8_nxv1f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg8_mask_nxv1f16(half* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(float* , i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlseg2_nxv1f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg2_mask_nxv1f32(float* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(float* , i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlseg3_nxv1f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg3_mask_nxv1f32(float* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(float* , i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlseg4_nxv1f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg4_mask_nxv1f32(float* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(float* , i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlseg5_nxv1f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg5_mask_nxv1f32(float* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(float* , i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlseg6_nxv1f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg6_mask_nxv1f32(float* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(float* , i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlseg7_nxv1f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg7_mask_nxv1f32(float* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(float* , i32)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x float> @test_vlseg8_nxv1f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg8_mask_nxv1f32(float* %base, i32 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(half* , i32)
+declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x half> @test_vlseg2_nxv8f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+ ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlseg2_mask_nxv8f16(half* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+ %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+ ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(half* , i32)
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x half> @test_vlseg3_nxv8f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+ ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlseg3_mask_nxv8f16(half* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+ %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+ ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(half* , i32)
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x half> @test_vlseg4_nxv8f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+ ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlseg4_mask_nxv8f16(half* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+ %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+ ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(float* , i32)
+declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x float> @test_vlseg2_nxv8f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
+ ret <vscale x 8 x float> %1
+}
+
+define <vscale x 8 x float> @test_vlseg2_mask_nxv8f32(float* %base, i32 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
+ %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.mask.nxv8f32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 8 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
+ ret <vscale x 8 x float> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(double* , i32)
+declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x double> @test_vlseg2_nxv2f64(double* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg2e64.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+ ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlseg2_mask_nxv2f64(double* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg2e64.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vlseg2e64.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+ %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+ ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(double* , i32)
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x double> @test_vlseg3_nxv2f64(double* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg3e64.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+ ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlseg3_mask_nxv2f64(double* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg3e64.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vlseg3e64.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+ %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+ ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(double* , i32)
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x double> @test_vlseg4_nxv2f64(double* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg4e64.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+ ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlseg4_mask_nxv2f64(double* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg4e64.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vlseg4e64.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(double* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+ %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+ ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(half* , i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlseg2_nxv4f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg2_mask_nxv4f16(half* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(half* , i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlseg3_nxv4f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg3_mask_nxv4f16(half* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(half* , i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlseg4_nxv4f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg4_mask_nxv4f16(half* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(half* , i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlseg5_nxv4f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg5_mask_nxv4f16(half* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(half* , i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlseg6_nxv4f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg6_mask_nxv4f16(half* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(half* , i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlseg7_nxv4f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg7_mask_nxv4f16(half* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(half* , i32)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x half> @test_vlseg8_nxv4f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg8_mask_nxv4f16(half* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(half* , i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlseg2_nxv2f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg2_mask_nxv2f16(half* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(half* , i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlseg3_nxv2f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg3_mask_nxv2f16(half* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(half* , i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlseg4_nxv2f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg4_mask_nxv2f16(half* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(half* , i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlseg5_nxv2f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg5_mask_nxv2f16(half* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(half* , i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlseg6_nxv2f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg6_mask_nxv2f16(half* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(half* , i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlseg7_nxv2f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg7_mask_nxv2f16(half* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(half* , i32)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x half> @test_vlseg8_nxv2f16(half* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg8_mask_nxv2f16(half* %base, i32 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(half* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(float* , i32)
+declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x float> @test_vlseg2_nxv4f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+ ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlseg2_mask_nxv4f32(float* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+ %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+ ret <vscale x 4 x float> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(float* , i32)
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x float> @test_vlseg3_nxv4f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+ ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlseg3_mask_nxv4f32(float* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+ %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+ ret <vscale x 4 x float> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(float* , i32)
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x float> @test_vlseg4_nxv4f32(float* %base, i32 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+ ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlseg4_mask_nxv4f32(float* %base, i32 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(float* %base, i32 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+ %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i1> %mask, i32 %vl)
+ %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+ ret <vscale x 4 x float> %3
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll
new file mode 100644
index 000000000000..d855d055667b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll
@@ -0,0 +1,5120 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zvlsseg,+experimental-zfh \
+; RUN: -verify-machineinstrs < %s | FileCheck %s
+
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(i16* , i64)
+declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, i16*, <vscale x 16 x i1>, i64)
+
+define <vscale x 16 x i16> @test_vlseg2_nxv16i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
+ ret <vscale x 16 x i16> %1
+}
+
+define <vscale x 16 x i16> @test_vlseg2_mask_nxv16i16(i16* %base, i64 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
+ %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.mask.nxv16i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, i16* %base, <vscale x 16 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
+ ret <vscale x 16 x i16> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(i32* , i64)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i32> @test_vlseg2_nxv4i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+ ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlseg2_mask_nxv4i32(i32* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+ %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+ ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(i32* , i64)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i32> @test_vlseg3_nxv4i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+ ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlseg3_mask_nxv4i32(i32* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+ %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+ ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(i32* , i64)
+declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i32> @test_vlseg4_nxv4i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
+ ret <vscale x 4 x i32> %1
+}
+
+define <vscale x 4 x i32> @test_vlseg4_mask_nxv4i32(i32* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
+ %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, i32* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
+ ret <vscale x 4 x i32> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(i8* , i64)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i64)
+
+define <vscale x 16 x i8> @test_vlseg2_nxv16i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg2e8.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+ ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlseg2_mask_nxv16i8(i8* %base, i64 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg2e8.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vlseg2e8.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+ %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+ ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(i8* , i64)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i64)
+
+define <vscale x 16 x i8> @test_vlseg3_nxv16i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg3e8.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+ ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlseg3_mask_nxv16i8(i8* %base, i64 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg3e8.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vlseg3e8.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+ %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+ ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(i8* , i64)
+declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i8*, <vscale x 16 x i1>, i64)
+
+define <vscale x 16 x i8> @test_vlseg4_nxv16i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg4e8.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
+ ret <vscale x 16 x i8> %1
+}
+
+define <vscale x 16 x i8> @test_vlseg4_mask_nxv16i8(i8* %base, i64 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m2,ta,mu
+; CHECK-NEXT: vlseg4e8.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vlseg4e8.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
+ %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, i8* %base, <vscale x 16 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
+ ret <vscale x 16 x i8> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg2.nxv1i64(i64* , i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg2.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlseg2_nxv1i64(i64* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg2e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg2.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+ ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlseg2_mask_nxv1i64(i64* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg2e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg2e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg2.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+ %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg2.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+ ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg3.nxv1i64(i64* , i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg3.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlseg3_nxv1i64(i64* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg3e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg3.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+ ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlseg3_mask_nxv1i64(i64* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg3e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg3e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg3.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+ %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg3.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+ ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg4.nxv1i64(i64* , i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg4.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlseg4_nxv1i64(i64* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg4e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg4.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+ ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlseg4_mask_nxv1i64(i64* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg4e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg4e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg4.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+ %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg4.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+ ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg5.nxv1i64(i64* , i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg5.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlseg5_nxv1i64(i64* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg5e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg5.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+ ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlseg5_mask_nxv1i64(i64* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg5e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg5e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg5.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+ %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg5.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+ ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg6.nxv1i64(i64* , i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg6.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlseg6_nxv1i64(i64* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg6e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg6.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+ ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlseg6_mask_nxv1i64(i64* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg6e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg6e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg6.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+ %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg6.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+ ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg7.nxv1i64(i64* , i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg7.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlseg7_nxv1i64(i64* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg7e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg7.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+ ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlseg7_mask_nxv1i64(i64* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg7e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg7e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg7.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+ %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg7.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+ ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg8.nxv1i64(i64* , i64)
+declare {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg8.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i64> @test_vlseg8_nxv1i64(i64* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg8e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg8.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 1
+ ret <vscale x 1 x i64> %1
+}
+
+define <vscale x 1 x i64> @test_vlseg8_mask_nxv1i64(i64* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg8e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg8e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg8.nxv1i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %0, 0
+ %2 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg8.mask.nxv1i64(<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1,<vscale x 1 x i64> %1, i64* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} %2, 1
+ ret <vscale x 1 x i64> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(i32* , i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlseg2_nxv1i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg2_mask_nxv1i32(i32* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(i32* , i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlseg3_nxv1i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg3_mask_nxv1i32(i32* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(i32* , i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlseg4_nxv1i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg4_mask_nxv1i32(i32* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(i32* , i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlseg5_nxv1i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg5_mask_nxv1i32(i32* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(i32* , i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlseg6_nxv1i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg6_mask_nxv1i32(i32* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(i32* , i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlseg7_nxv1i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg7_mask_nxv1i32(i32* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(i32* , i64)
+declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i32> @test_vlseg8_nxv1i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
+ ret <vscale x 1 x i32> %1
+}
+
+define <vscale x 1 x i32> @test_vlseg8_mask_nxv1i32(i32* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
+ %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, i32* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
+ ret <vscale x 1 x i32> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(i16* , i64)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i16> @test_vlseg2_nxv8i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+ ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlseg2_mask_nxv8i16(i16* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+ %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+ ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(i16* , i64)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i16> @test_vlseg3_nxv8i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+ ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlseg3_mask_nxv8i16(i16* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+ %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+ ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(i16* , i64)
+declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i16*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i16> @test_vlseg4_nxv8i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
+ ret <vscale x 8 x i16> %1
+}
+
+define <vscale x 8 x i16> @test_vlseg4_mask_nxv8i16(i16* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
+ %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, i16* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
+ ret <vscale x 8 x i16> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(i8* , i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlseg2_nxv4i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg2_mask_nxv4i8(i8* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(i8* , i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlseg3_nxv4i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg3_mask_nxv4i8(i8* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(i8* , i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlseg4_nxv4i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg4_mask_nxv4i8(i8* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(i8* , i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlseg5_nxv4i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg5_mask_nxv4i8(i8* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(i8* , i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlseg6_nxv4i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg6_mask_nxv4i8(i8* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(i8* , i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlseg7_nxv4i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg7_mask_nxv4i8(i8* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(i8* , i64)
+declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i8*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i8> @test_vlseg8_nxv4i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
+ ret <vscale x 4 x i8> %1
+}
+
+define <vscale x 4 x i8> @test_vlseg8_mask_nxv4i8(i8* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
+ %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, i8* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
+ ret <vscale x 4 x i8> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(i16* , i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlseg2_nxv1i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg2_mask_nxv1i16(i16* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(i16* , i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlseg3_nxv1i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg3_mask_nxv1i16(i16* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(i16* , i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlseg4_nxv1i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg4_mask_nxv1i16(i16* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(i16* , i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlseg5_nxv1i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg5_mask_nxv1i16(i16* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(i16* , i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlseg6_nxv1i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg6_mask_nxv1i16(i16* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(i16* , i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlseg7_nxv1i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg7_mask_nxv1i16(i16* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(i16* , i64)
+declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i16*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i16> @test_vlseg8_nxv1i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
+ ret <vscale x 1 x i16> %1
+}
+
+define <vscale x 1 x i16> @test_vlseg8_mask_nxv1i16(i16* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
+ %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, i16* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
+ ret <vscale x 1 x i16> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(i32* , i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlseg2_nxv2i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg2_mask_nxv2i32(i32* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(i32* , i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlseg3_nxv2i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg3_mask_nxv2i32(i32* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(i32* , i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlseg4_nxv2i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg4_mask_nxv2i32(i32* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(i32* , i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlseg5_nxv2i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg5_mask_nxv2i32(i32* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(i32* , i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlseg6_nxv2i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg6_mask_nxv2i32(i32* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(i32* , i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlseg7_nxv2i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg7_mask_nxv2i32(i32* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(i32* , i64)
+declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i32> @test_vlseg8_nxv2i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
+ ret <vscale x 2 x i32> %1
+}
+
+define <vscale x 2 x i32> @test_vlseg8_mask_nxv2i32(i32* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
+ %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, i32* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
+ ret <vscale x 2 x i32> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(i8* , i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlseg2_nxv8i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg2_mask_nxv8i8(i8* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(i8* , i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlseg3_nxv8i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg3_mask_nxv8i8(i8* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(i8* , i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlseg4_nxv8i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg4_mask_nxv8i8(i8* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(i8* , i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlseg5_nxv8i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg5_mask_nxv8i8(i8* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(i8* , i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlseg6_nxv8i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg6_mask_nxv8i8(i8* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(i8* , i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlseg7_nxv8i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg7_mask_nxv8i8(i8* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(i8* , i64)
+declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i8*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i8> @test_vlseg8_nxv8i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
+ ret <vscale x 8 x i8> %1
+}
+
+define <vscale x 8 x i8> @test_vlseg8_mask_nxv8i8(i8* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m1,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
+ %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, i8* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
+ ret <vscale x 8 x i8> %3
+}
+
+declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlseg2.nxv4i64(i64* , i64)
+declare {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlseg2.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i64>, i64*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i64> @test_vlseg2_nxv4i64(i64* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vlseg2e64.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlseg2.nxv4i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 1
+ ret <vscale x 4 x i64> %1
+}
+
+define <vscale x 4 x i64> @test_vlseg2_mask_nxv4i64(i64* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu
+; CHECK-NEXT: vlseg2e64.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vlseg2e64.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlseg2.nxv4i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %0, 0
+ %2 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlseg2.mask.nxv4i64(<vscale x 4 x i64> %1,<vscale x 4 x i64> %1, i64* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i64>,<vscale x 4 x i64>} %2, 1
+ ret <vscale x 4 x i64> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(i16* , i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlseg2_nxv4i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg2_mask_nxv4i16(i16* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(i16* , i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlseg3_nxv4i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg3_mask_nxv4i16(i16* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(i16* , i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlseg4_nxv4i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg4_mask_nxv4i16(i16* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(i16* , i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlseg5_nxv4i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg5_mask_nxv4i16(i16* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(i16* , i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlseg6_nxv4i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg6_mask_nxv4i16(i16* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(i16* , i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlseg7_nxv4i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg7_mask_nxv4i16(i16* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(i16* , i64)
+declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i16*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x i16> @test_vlseg8_nxv4i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
+ ret <vscale x 4 x i16> %1
+}
+
+define <vscale x 4 x i16> @test_vlseg8_mask_nxv4i16(i16* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
+ %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, i16* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
+ ret <vscale x 4 x i16> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(i8* , i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlseg2_nxv1i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg2_mask_nxv1i8(i8* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(i8* , i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlseg3_nxv1i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg3_mask_nxv1i8(i8* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(i8* , i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlseg4_nxv1i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg4_mask_nxv1i8(i8* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(i8* , i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlseg5_nxv1i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg5_mask_nxv1i8(i8* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(i8* , i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlseg6_nxv1i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg6_mask_nxv1i8(i8* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(i8* , i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlseg7_nxv1i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg7_mask_nxv1i8(i8* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(i8* , i64)
+declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i8*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x i8> @test_vlseg8_nxv1i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
+ ret <vscale x 1 x i8> %1
+}
+
+define <vscale x 1 x i8> @test_vlseg8_mask_nxv1i8(i8* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
+ %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, i8* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
+ ret <vscale x 1 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(i8* , i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlseg2_nxv2i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg2_mask_nxv2i8(i8* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg2e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(i8* , i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlseg3_nxv2i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg3_mask_nxv2i8(i8* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg3e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(i8* , i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlseg4_nxv2i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg4_mask_nxv2i8(i8* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg4e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(i8* , i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlseg5_nxv2i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg5_mask_nxv2i8(i8* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg5e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(i8* , i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlseg6_nxv2i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg6_mask_nxv2i8(i8* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg6e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(i8* , i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlseg7_nxv2i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg7_mask_nxv2i8(i8* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg7e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(i8* , i64)
+declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i8*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i8> @test_vlseg8_nxv2i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
+ ret <vscale x 2 x i8> %1
+}
+
+define <vscale x 2 x i8> @test_vlseg8_mask_nxv2i8(i8* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vlseg8e8.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
+ %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, i8* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
+ ret <vscale x 2 x i8> %3
+}
+
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(i32* , i64)
+declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, i32*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x i32> @test_vlseg2_nxv8i32(i32* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
+ ret <vscale x 8 x i32> %1
+}
+
+define <vscale x 8 x i32> @test_vlseg2_mask_nxv8i32(i32* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
+ %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.mask.nxv8i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, i32* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
+ ret <vscale x 8 x i32> %3
+}
+
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(i8* , i64)
+declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, i8*, <vscale x 32 x i1>, i64)
+
+define <vscale x 32 x i8> @test_vlseg2_nxv32i8(i8* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vlseg2e8.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
+ ret <vscale x 32 x i8> %1
+}
+
+define <vscale x 32 x i8> @test_vlseg2_mask_nxv32i8(i8* %base, i64 %vl, <vscale x 32 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e8,m4,ta,mu
+; CHECK-NEXT: vlseg2e8.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu
+; CHECK-NEXT: vlseg2e8.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
+ %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.mask.nxv32i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, i8* %base, <vscale x 32 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
+ ret <vscale x 32 x i8> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(i16* , i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlseg2_nxv2i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg2_mask_nxv2i16(i16* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(i16* , i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlseg3_nxv2i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg3_mask_nxv2i16(i16* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(i16* , i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlseg4_nxv2i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg4_mask_nxv2i16(i16* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(i16* , i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlseg5_nxv2i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg5_mask_nxv2i16(i16* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(i16* , i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlseg6_nxv2i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg6_mask_nxv2i16(i16* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(i16* , i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlseg7_nxv2i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg7_mask_nxv2i16(i16* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(i16* , i64)
+declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i16*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i16> @test_vlseg8_nxv2i16(i16* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
+ ret <vscale x 2 x i16> %1
+}
+
+define <vscale x 2 x i16> @test_vlseg8_mask_nxv2i16(i16* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
+ %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, i16* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
+ ret <vscale x 2 x i16> %3
+}
+
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg2.nxv2i64(i64* , i64)
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg2.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i64> @test_vlseg2_nxv2i64(i64* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg2e64.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg2.nxv2i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
+ ret <vscale x 2 x i64> %1
+}
+
+define <vscale x 2 x i64> @test_vlseg2_mask_nxv2i64(i64* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg2e64.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vlseg2e64.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg2.nxv2i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
+ %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg2.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, i64* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
+ ret <vscale x 2 x i64> %3
+}
+
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg3.nxv2i64(i64* , i64)
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg3.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i64> @test_vlseg3_nxv2i64(i64* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg3e64.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg3.nxv2i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
+ ret <vscale x 2 x i64> %1
+}
+
+define <vscale x 2 x i64> @test_vlseg3_mask_nxv2i64(i64* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg3e64.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vlseg3e64.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg3.nxv2i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
+ %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg3.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, i64* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
+ ret <vscale x 2 x i64> %3
+}
+
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg4.nxv2i64(i64* , i64)
+declare {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg4.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x i64> @test_vlseg4_nxv2i64(i64* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg4e64.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg4.nxv2i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 1
+ ret <vscale x 2 x i64> %1
+}
+
+define <vscale x 2 x i64> @test_vlseg4_mask_nxv2i64(i64* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg4e64.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vlseg4e64.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg4.nxv2i64(i64* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %0, 0
+ %2 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg4.mask.nxv2i64(<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1,<vscale x 2 x i64> %1, i64* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} %2, 1
+ ret <vscale x 2 x i64> %3
+}
+
+declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(half* , i64)
+declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, half*, <vscale x 16 x i1>, i64)
+
+define <vscale x 16 x half> @test_vlseg2_nxv16f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
+ ret <vscale x 16 x half> %1
+}
+
+define <vscale x 16 x half> @test_vlseg2_mask_nxv16f16(half* %base, i64 %vl, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vlseg2e16.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
+ %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.mask.nxv16f16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, half* %base, <vscale x 16 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
+ ret <vscale x 16 x half> %3
+}
+
+declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(double* , i64)
+declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, double*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x double> @test_vlseg2_nxv4f64(double* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vlseg2e64.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
+ ret <vscale x 4 x double> %1
+}
+
+define <vscale x 4 x double> @test_vlseg2_mask_nxv4f64(double* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu
+; CHECK-NEXT: vlseg2e64.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vlseg2e64.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
+ %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.mask.nxv4f64(<vscale x 4 x double> %1,<vscale x 4 x double> %1, double* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
+ ret <vscale x 4 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(double* , i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlseg2_nxv1f64(double* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg2e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg2_mask_nxv1f64(double* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg2e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg2e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(double* , i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlseg3_nxv1f64(double* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg3e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg3_mask_nxv1f64(double* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg3e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg3e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(double* , i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlseg4_nxv1f64(double* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg4e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg4_mask_nxv1f64(double* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg4e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg4e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(double* , i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlseg5_nxv1f64(double* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg5e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg5_mask_nxv1f64(double* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg5e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg5e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(double* , i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlseg6_nxv1f64(double* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg6e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg6_mask_nxv1f64(double* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg6e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg6e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(double* , i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlseg7_nxv1f64(double* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg7e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg7_mask_nxv1f64(double* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg7e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg7e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(double* , i64)
+declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, double*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x double> @test_vlseg8_nxv1f64(double* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg8e64.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
+ ret <vscale x 1 x double> %1
+}
+
+define <vscale x 1 x double> @test_vlseg8_mask_nxv1f64(double* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vlseg8e64.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vlseg8e64.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
+ %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, double* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
+ ret <vscale x 1 x double> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(float* , i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlseg2_nxv2f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg2_mask_nxv2f32(float* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(float* , i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlseg3_nxv2f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg3_mask_nxv2f32(float* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(float* , i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlseg4_nxv2f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg4_mask_nxv2f32(float* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(float* , i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlseg5_nxv2f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg5_mask_nxv2f32(float* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(float* , i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlseg6_nxv2f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg6_mask_nxv2f32(float* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(float* , i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlseg7_nxv2f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg7_mask_nxv2f32(float* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(float* , i64)
+declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, float*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x float> @test_vlseg8_nxv2f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
+ ret <vscale x 2 x float> %1
+}
+
+define <vscale x 2 x float> @test_vlseg8_mask_nxv2f32(float* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m1,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
+ %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, float* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
+ ret <vscale x 2 x float> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(half* , i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlseg2_nxv1f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg2_mask_nxv1f16(half* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(half* , i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlseg3_nxv1f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg3_mask_nxv1f16(half* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(half* , i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlseg4_nxv1f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg4_mask_nxv1f16(half* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(half* , i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlseg5_nxv1f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg5_mask_nxv1f16(half* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(half* , i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlseg6_nxv1f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg6_mask_nxv1f16(half* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(half* , i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlseg7_nxv1f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg7_mask_nxv1f16(half* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(half* , i64)
+declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, half*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x half> @test_vlseg8_nxv1f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
+ ret <vscale x 1 x half> %1
+}
+
+define <vscale x 1 x half> @test_vlseg8_mask_nxv1f16(half* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
+ %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, half* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
+ ret <vscale x 1 x half> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(float* , i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlseg2_nxv1f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg2_mask_nxv1f32(float* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg2e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(float* , i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlseg3_nxv1f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg3_mask_nxv1f32(float* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg3e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(float* , i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlseg4_nxv1f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg4_mask_nxv1f32(float* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg4e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(float* , i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlseg5_nxv1f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg5_mask_nxv1f32(float* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg5e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(float* , i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlseg6_nxv1f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg6_mask_nxv1f32(float* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg6e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(float* , i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlseg7_nxv1f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg7_mask_nxv1f32(float* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg7e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(float* , i64)
+declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, float*, <vscale x 1 x i1>, i64)
+
+define <vscale x 1 x float> @test_vlseg8_nxv1f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
+ ret <vscale x 1 x float> %1
+}
+
+define <vscale x 1 x float> @test_vlseg8_mask_nxv1f32(float* %base, i64 %vl, <vscale x 1 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vlseg8e32.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
+ %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, float* %base, <vscale x 1 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
+ ret <vscale x 1 x float> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(half* , i64)
+declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x half> @test_vlseg2_nxv8f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+ ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlseg2_mask_nxv8f16(half* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg2e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+ %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+ ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(half* , i64)
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x half> @test_vlseg3_nxv8f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+ ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlseg3_mask_nxv8f16(half* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg3e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+ %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+ ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(half* , i64)
+declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, half*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x half> @test_vlseg4_nxv8f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
+ ret <vscale x 8 x half> %1
+}
+
+define <vscale x 8 x half> @test_vlseg4_mask_nxv8f16(half* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vlseg4e16.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
+ %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, half* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
+ ret <vscale x 8 x half> %3
+}
+
+declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(float* , i64)
+declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, float*, <vscale x 8 x i1>, i64)
+
+define <vscale x 8 x float> @test_vlseg2_nxv8f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0)
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
+ ret <vscale x 8 x float> %1
+}
+
+define <vscale x 8 x float> @test_vlseg2_mask_nxv8f32(float* %base, i64 %vl, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m4,ta,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0)
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vlseg2e32.v v12, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m4 killed $v16m4 killed $v12m4_v16m4
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
+ %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.mask.nxv8f32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, float* %base, <vscale x 8 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
+ ret <vscale x 8 x float> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(double* , i64)
+declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x double> @test_vlseg2_nxv2f64(double* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg2e64.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+ ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlseg2_mask_nxv2f64(double* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg2e64.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vlseg2e64.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+ %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+ ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(double* , i64)
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x double> @test_vlseg3_nxv2f64(double* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg3e64.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+ ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlseg3_mask_nxv2f64(double* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg3e64.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vlseg3e64.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+ %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+ ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(double* , i64)
+declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, double*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x double> @test_vlseg4_nxv2f64(double* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg4e64.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
+ ret <vscale x 2 x double> %1
+}
+
+define <vscale x 2 x double> @test_vlseg4_mask_nxv2f64(double* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu
+; CHECK-NEXT: vlseg4e64.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vlseg4e64.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(double* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
+ %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, double* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
+ ret <vscale x 2 x double> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(half* , i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlseg2_nxv4f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg2_mask_nxv4f16(half* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(half* , i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlseg3_nxv4f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg3_mask_nxv4f16(half* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(half* , i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlseg4_nxv4f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg4_mask_nxv4f16(half* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(half* , i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlseg5_nxv4f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg5_mask_nxv4f16(half* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(half* , i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlseg6_nxv4f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg6_mask_nxv4f16(half* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(half* , i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlseg7_nxv4f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg7_mask_nxv4f16(half* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(half* , i64)
+declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, half*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x half> @test_vlseg8_nxv4f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
+ ret <vscale x 4 x half> %1
+}
+
+define <vscale x 4 x half> @test_vlseg8_mask_nxv4f16(half* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,m1,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
+ %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, half* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
+ ret <vscale x 4 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(half* , i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlseg2_nxv2f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg2_mask_nxv2f16(half* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg2e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(half* , i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlseg3_nxv2f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg3_mask_nxv2f16(half* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg3e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(half* , i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlseg4_nxv2f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg4_mask_nxv2f16(half* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg4e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(half* , i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlseg5_nxv2f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg5_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg5_mask_nxv2f16(half* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg5_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg5e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(half* , i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlseg6_nxv2f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg6_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg6_mask_nxv2f16(half* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg6_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg6e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(half* , i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlseg7_nxv2f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg7_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg7_mask_nxv2f16(half* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg7_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg7e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(half* , i64)
+declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, half*, <vscale x 2 x i1>, i64)
+
+define <vscale x 2 x half> @test_vlseg8_nxv2f16(half* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg8_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
+ ret <vscale x 2 x half> %1
+}
+
+define <vscale x 2 x half> @test_vlseg8_mask_nxv2f16(half* %base, i64 %vl, <vscale x 2 x i1> %mask) {
+; CHECK-LABEL: test_vlseg8_mask_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0)
+; CHECK-NEXT: vmv1r.v v16, v15
+; CHECK-NEXT: vmv1r.v v17, v15
+; CHECK-NEXT: vmv1r.v v18, v15
+; CHECK-NEXT: vmv1r.v v19, v15
+; CHECK-NEXT: vmv1r.v v20, v15
+; CHECK-NEXT: vmv1r.v v21, v15
+; CHECK-NEXT: vmv1r.v v22, v15
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vlseg8e16.v v15, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16 killed $v16 killed $v15_v16_v17_v18_v19_v20_v21_v22
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(half* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
+ %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, half* %base, <vscale x 2 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
+ ret <vscale x 2 x half> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(float* , i64)
+declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x float> @test_vlseg2_nxv4f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg2_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+ ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlseg2_mask_nxv4f32(float* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg2_mask_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg2e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+ %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+ ret <vscale x 4 x float> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(float* , i64)
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x float> @test_vlseg3_nxv4f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg3_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+ ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlseg3_mask_nxv4f32(float* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg3_mask_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg3e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+ %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+ ret <vscale x 4 x float> %3
+}
+
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(float* , i64)
+declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, float*, <vscale x 4 x i1>, i64)
+
+define <vscale x 4 x float> @test_vlseg4_nxv4f32(float* %base, i64 %vl) {
+; CHECK-LABEL: test_vlseg4_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0)
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
+ ret <vscale x 4 x float> %1
+}
+
+define <vscale x 4 x float> @test_vlseg4_mask_nxv4f32(float* %base, i64 %vl, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: test_vlseg4_mask_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0)
+; CHECK-NEXT: vmv2r.v v16, v14
+; CHECK-NEXT: vmv2r.v v18, v14
+; CHECK-NEXT: vmv2r.v v20, v14
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vlseg4e32.v v14, (a0), v0.t
+; CHECK-NEXT: # kill: def $v16m2 killed $v16m2 killed $v14m2_v16m2_v18m2_v20m2
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(float* %base, i64 %vl)
+ %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
+ %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, float* %base, <vscale x 4 x i1> %mask, i64 %vl)
+ %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
+ ret <vscale x 4 x float> %3
+}
More information about the llvm-branch-commits
mailing list