[clang] 010f329 - [RISCV][Clang] Support policy function for all vector segment load.

Zakk Chen via cfe-commits cfe-commits at lists.llvm.org
Thu Aug 4 10:51:29 PDT 2022


Author: Zakk Chen
Date: 2022-08-04T17:47:24Z
New Revision: 010f329803c84e43ec15ffaff7b6e26b032cbcc6

URL: https://github.com/llvm/llvm-project/commit/010f329803c84e43ec15ffaff7b6e26b032cbcc6
DIFF: https://github.com/llvm/llvm-project/commit/010f329803c84e43ec15ffaff7b6e26b032cbcc6.diff

LOG: [RISCV][Clang] Support policy function for all vector segment load.

We will switch all UndefValue to PoisonValue in follow up patches.

Reviewed By: kito-cheng

Differential Revision: https://reviews.llvm.org/D126750

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/lib/Support/RISCVVIntrinsicUtils.cpp
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask_mf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask_mf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index c20600e4f2a00..f303dfac04f72 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -843,22 +843,29 @@ multiclass RVVUnitStridedSegLoad<string op> {
             IRName = op # nf,
             MaskedIRName = op # nf # "_mask",
             NF = nf,
-            SupportOverloading = false,
             ManualCodegen = [{
     {
-      // builtin: (val0 address, val1 address, ..., ptr, vl)
       ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType());
-      IntrinsicTypes = {ResultType, Ops[NF + 1]->getType()};
+      // TA builtin: (val0 address, val1 address, ..., ptr, vl)
+      // TU builtin: (val0 address, ..., passthru0, ..., ptr, vl)
+      IntrinsicTypes = {ResultType, Ops.back()->getType()};
       // intrinsic: (passthru0, passthru1, ..., ptr, vl)
       SmallVector<llvm::Value*, 10> Operands;
-      for (unsigned I = 0; I < NF; ++I)
-        Operands.push_back(llvm::UndefValue::get(ResultType));
-      Operands.push_back(Ops[NF]);
-      Operands.push_back(Ops[NF + 1]);
+      if (DefaultPolicy == TAIL_AGNOSTIC) {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(llvm::UndefValue::get(ResultType));
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[NF + 1]);
+      } else {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(Ops[NF + I]);
+        Operands.push_back(Ops[2 * NF]);
+        Operands.push_back(Ops[2 * NF + 1]);
+      }
       llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
       llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
       clang::CharUnits Align =
-          CGM.getNaturalPointeeTypeAlignment(E->getArg(NF)->getType());
+          CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
       llvm::Value *V;
       for (unsigned I = 0; I < NF; ++I) {
         llvm::Value *Val = Builder.CreateExtractValue(LoadValue, {I});
@@ -869,17 +876,26 @@ multiclass RVVUnitStridedSegLoad<string op> {
             }],
             MaskedManualCodegen = [{
     {
+      // TAMA builtin: (val0 address, ..., mask, ptr, vl)
       // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, vl)
       // intrinsic: (maskedoff0, ..., ptr, mask, vl)
-      IntrinsicTypes = {ConvertType(E->getArg(0)->getType()->getPointeeType()),
-                        Ops[2 * NF + 2]->getType()};
+      ResultType =  ConvertType(E->getArg(0)->getType()->getPointeeType());
+      IntrinsicTypes = {ResultType, Ops.back()->getType()};
       SmallVector<llvm::Value*, 12> Operands;
-      for (unsigned I = 0; I < NF; ++I)
-        Operands.push_back(Ops[NF + I + 1]);
-      Operands.push_back(Ops[2 * NF + 1]);
-      Operands.push_back(Ops[NF]);
-      Operands.push_back(Ops[2 * NF + 2]);
-      Operands.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED));
+      if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(llvm::UndefValue::get(ResultType));
+        Operands.push_back(Ops[NF + 1]);
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[NF + 2]);
+      } else {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(Ops[NF + I + 1]);
+        Operands.push_back(Ops[2 * NF + 1]);
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[2 * NF + 2]);
+      }
+      Operands.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
       assert(Operands.size() == NF + 4);
       llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
       llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
@@ -918,19 +934,28 @@ multiclass RVVUnitStridedSegLoadFF<string op> {
             IRName = op # nf # "ff",
             MaskedIRName = op # nf # "ff_mask",
             NF = nf,
-            SupportOverloading = false,
             ManualCodegen = [{
     {
-      // builtin: (val0 address, val1 address, ..., ptr, new_vl, vl)
+      // TA builtin: (val0 address, val1 address, ..., ptr, new_vl, vl)
+      // TU builtin: (val0 address, ..., passthru0, ..., ptr, new_vl, vl)
       ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType());
-      IntrinsicTypes = {ResultType, Ops[NF + 2]->getType()};
+      IntrinsicTypes = {ResultType, Ops.back()->getType()};
       // intrinsic: (passthru0, passthru1, ..., ptr, vl)
       SmallVector<llvm::Value*, 12> Operands;
-      for (unsigned I = 0; I < NF; ++I)
-        Operands.push_back(llvm::UndefValue::get(ResultType));
-      Operands.push_back(Ops[NF]);
-      Operands.push_back(Ops[NF + 2]);
-      Value *NewVL = Ops[NF + 1];
+      Value *NewVL;
+      if (DefaultPolicy == TAIL_AGNOSTIC) {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(llvm::UndefValue::get(ResultType));
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[NF + 2]);
+        NewVL = Ops[NF + 1];
+      } else {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(Ops[NF + I]);
+        Operands.push_back(Ops[2 * NF]);
+        Operands.push_back(Ops[2 * NF + 2]);
+        NewVL = Ops[2 * NF + 1];
+      }
       llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
       llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
       clang::CharUnits Align =
@@ -946,18 +971,29 @@ multiclass RVVUnitStridedSegLoadFF<string op> {
             }],
             MaskedManualCodegen = [{
     {
+      // TAMA builtin: (val0 address, ..., mask, ptr, new_vl, vl)
       // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, new_vl, vl)
       // intrinsic: (maskedoff0, ..., ptr, mask, vl)
-      IntrinsicTypes = {ConvertType(E->getArg(0)->getType()->getPointeeType()),
-                        Ops[2 * NF + 3]->getType()};
+      ResultType =  ConvertType(E->getArg(0)->getType()->getPointeeType());
+      IntrinsicTypes = {ResultType, Ops.back()->getType()};
       SmallVector<llvm::Value*, 12> Operands;
-      for (unsigned I = 0; I < NF; ++I)
-        Operands.push_back(Ops[NF + I + 1]);
-      Operands.push_back(Ops[2 * NF + 1]);
-      Operands.push_back(Ops[NF]);
-      Operands.push_back(Ops[2 * NF + 3]);
-      Operands.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED));
-      Value *NewVL = Ops[2 * NF + 2];
+      Value *NewVL;
+      if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(llvm::UndefValue::get(ResultType));
+        Operands.push_back(Ops[NF + 1]);
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[NF + 3]);
+        NewVL = Ops[NF + 2];
+      } else {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(Ops[NF + I + 1]);
+        Operands.push_back(Ops[2 * NF + 1]);
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[2 * NF + 3]);
+        NewVL = Ops[2 * NF + 2];
+      }
+      Operands.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
       assert(Operands.size() == NF + 4);
       llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
       llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
@@ -997,19 +1033,27 @@ multiclass RVVStridedSegLoad<string op> {
             IRName = op # nf,
             MaskedIRName = op # nf # "_mask",
             NF = nf,
-            SupportOverloading = false,
             ManualCodegen = [{
     {
-      // builtin: (val0 address, val1 address, ..., ptr, stride, vl)
+      // TA builtin: (val0 address, val1 address, ..., ptr, stride, vl)
+      // TU builtin: (val0 address, ..., passthru0, ..., ptr, stride, vl)
       ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType());
-      IntrinsicTypes = {ResultType, Ops[NF + 2]->getType()};
+      IntrinsicTypes = {ResultType, Ops.back()->getType()};
       // intrinsic: (passthru0, passthru1, ..., ptr, stride, vl)
       SmallVector<llvm::Value*, 12> Operands;
-      for (unsigned I = 0; I < NF; ++I)
-        Operands.push_back(llvm::UndefValue::get(ResultType));
-      Operands.push_back(Ops[NF]);
-      Operands.push_back(Ops[NF + 1]);
-      Operands.push_back(Ops[NF + 2]);
+      if (DefaultPolicy == TAIL_AGNOSTIC) {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(llvm::UndefValue::get(ResultType));
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[NF + 1]);
+        Operands.push_back(Ops[NF + 2]);
+      } else {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(Ops[NF + I]);
+        Operands.push_back(Ops[2 * NF]);
+        Operands.push_back(Ops[2 * NF + 1]);
+        Operands.push_back(Ops[2 * NF + 2]);
+      }
       llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
       llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
       clang::CharUnits Align =
@@ -1024,18 +1068,28 @@ multiclass RVVStridedSegLoad<string op> {
             }],
             MaskedManualCodegen = [{
     {
+      //TAMA builtin: (val0 address, ..., mask, ptr, stride, vl)
       // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, stride, vl)
       // intrinsic: (maskedoff0, ..., ptr, stride, mask, vl)
-      IntrinsicTypes = {ConvertType(E->getArg(0)->getType()->getPointeeType()),
-                        Ops[2 * NF + 3]->getType()};
+      ResultType =  ConvertType(E->getArg(0)->getType()->getPointeeType());
+      IntrinsicTypes = {ResultType, Ops.back()->getType()};
       SmallVector<llvm::Value*, 12> Operands;
-      for (unsigned I = 0; I < NF; ++I)
-        Operands.push_back(Ops[NF + I + 1]);
-      Operands.push_back(Ops[2 * NF + 1]);
-      Operands.push_back(Ops[2 * NF + 2]);
-      Operands.push_back(Ops[NF]);
-      Operands.push_back(Ops[2 * NF + 3]);
-      Operands.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED));
+      if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(llvm::UndefValue::get(ResultType));
+        Operands.push_back(Ops[NF + 1]);
+        Operands.push_back(Ops[NF + 2]);
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[NF + 3]);
+      } else {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(Ops[NF + I + 1]);
+        Operands.push_back(Ops[2 * NF + 1]);
+        Operands.push_back(Ops[2 * NF + 2]);
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[2 * NF + 3]);
+      }
+      Operands.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
       assert(Operands.size() == NF + 5);
       llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
       llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
@@ -1072,16 +1126,26 @@ multiclass RVVIndexedSegLoad<string op> {
             NF = nf,
             ManualCodegen = [{
     {
-      // builtin: (val0 address, val1 address, ..., ptr, index, vl)
+      // TA builtin: (val0 address, val1 address, ..., ptr, index, vl)
+      // TU builtin: (val0 address, ..., passthru0, ..., ptr, index, vl)
       ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType());
-      IntrinsicTypes = {ResultType, Ops[NF + 1]->getType(), Ops[NF + 2]->getType()};
       // intrinsic: (passthru0, passthru1, ..., ptr, index, vl)
       SmallVector<llvm::Value*, 12> Operands;
-      for (unsigned I = 0; I < NF; ++I)
-        Operands.push_back(llvm::UndefValue::get(ResultType));
-      Operands.push_back(Ops[NF]);
-      Operands.push_back(Ops[NF + 1]);
-      Operands.push_back(Ops[NF + 2]);
+      if (DefaultPolicy == TAIL_AGNOSTIC) {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(llvm::UndefValue::get(ResultType));
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[NF + 1]);
+        Operands.push_back(Ops[NF + 2]);
+        IntrinsicTypes = {ResultType, Ops[NF + 1]->getType(), Ops.back()->getType()};
+      } else {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(Ops[NF + I]);
+        Operands.push_back(Ops[2 * NF]);
+        Operands.push_back(Ops[2 * NF + 1]);
+        Operands.push_back(Ops[2 * NF + 2]);
+        IntrinsicTypes = {ResultType, Ops[2 * NF + 1]->getType(), Ops.back()->getType()};
+      }
       llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
       llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
       clang::CharUnits Align =
@@ -1096,18 +1160,29 @@ multiclass RVVIndexedSegLoad<string op> {
             }],
             MaskedManualCodegen = [{
     {
+      // TAMA builtin: (val0 address, ..., mask, ptr, index, vl)
       // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, index, vl)
-      IntrinsicTypes = {ConvertType(E->getArg(0)->getType()->getPointeeType()),
-                        Ops[2 * NF + 2]->getType(), Ops[2 * NF + 3]->getType()};
+      ResultType = ConvertType(E->getArg(0)->getType()->getPointeeType());
       // intrinsic: (maskedoff0, ..., ptr, index, mask, vl)
       SmallVector<llvm::Value*, 12> Operands;
-      for (unsigned I = 0; I < NF; ++I)
-        Operands.push_back(Ops[NF + I + 1]);
-      Operands.push_back(Ops[2 * NF + 1]);
-      Operands.push_back(Ops[2 * NF + 2]);
-      Operands.push_back(Ops[NF]);
-      Operands.push_back(Ops[2 * NF + 3]);
-      Operands.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED));
+      if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC) {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(llvm::UndefValue::get(ResultType));
+        Operands.push_back(Ops[NF + 1]);
+        Operands.push_back(Ops[NF + 2]);
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[NF + 3]);
+        IntrinsicTypes = {ResultType, Ops[NF + 2]->getType(), Ops.back()->getType()};
+      } else {
+        for (unsigned I = 0; I < NF; ++I)
+          Operands.push_back(Ops[NF + I + 1]);
+        Operands.push_back(Ops[2 * NF + 1]);
+        Operands.push_back(Ops[2 * NF + 2]);
+        Operands.push_back(Ops[NF]);
+        Operands.push_back(Ops[2 * NF + 3]);
+        IntrinsicTypes = {ResultType, Ops[2 * NF + 2]->getType(), Ops.back()->getType()};
+      }
+      Operands.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
       assert(Operands.size() == NF + 5);
       llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
       llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
@@ -1649,14 +1724,15 @@ defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>;
 defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>;
 
 // 7.8 Vector Load/Store Segment Instructions
-// TODO: Support policy function for segment load.
-let UnMaskedPolicyScheme = NonePolicy,
-    MaskedPolicyScheme = NonePolicy in {
+let UnMaskedPolicyScheme = HasPassthruOperand in {
 defm : RVVUnitStridedSegLoad<"vlseg">;
 defm : RVVUnitStridedSegLoadFF<"vlseg">;
 defm : RVVStridedSegLoad<"vlsseg">;
 defm : RVVIndexedSegLoad<"vluxseg">;
 defm : RVVIndexedSegLoad<"vloxseg">;
+}
+let UnMaskedPolicyScheme = NonePolicy,
+    MaskedPolicyScheme = NonePolicy in {
 defm : RVVUnitStridedSegStore<"vsseg">;
 defm : RVVStridedSegStore<"vssseg">;
 defm : RVVIndexedSegStore<"vsuxseg">;

diff  --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index f57d21bd5c295..87a89feb4b0d8 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -941,8 +941,8 @@ llvm::SmallVector<PrototypeDescriptor> RVVIntrinsic::computeBuiltinTypes(
   if (IsMasked) {
     // If HasMaskedOffOperand, insert result type as first input operand if
     // need.
-    if (HasMaskedOffOperand) {
-      if (NF == 1 && DefaultPolicy != Policy::TAMA) {
+    if (HasMaskedOffOperand && DefaultPolicy != Policy::TAMA) {
+      if (NF == 1) {
         NewPrototype.insert(NewPrototype.begin() + 1, NewPrototype[0]);
       } else if (NF > 1) {
         // Convert
@@ -971,22 +971,34 @@ llvm::SmallVector<PrototypeDescriptor> RVVIntrinsic::computeBuiltinTypes(
       // If IsMasked, insert PrototypeDescriptor:Mask as first input operand.
       NewPrototype.insert(NewPrototype.begin() + 1, PrototypeDescriptor::Mask);
     }
-  } else if (NF == 1) {
-    if (DefaultPolicy == Policy::TU && HasPassthruOp && !IsPrototypeDefaultTU)
-      NewPrototype.insert(NewPrototype.begin(), NewPrototype[0]);
-    else if (DefaultPolicy == Policy::TA && HasPassthruOp &&
-             IsPrototypeDefaultTU)
-      NewPrototype.erase(NewPrototype.begin() + 1);
-    if (DefaultScheme == PolicyScheme::HasPassthruOperandAtIdx1) {
-      if (DefaultPolicy == Policy::TU && !IsPrototypeDefaultTU) {
-        // Insert undisturbed output to index 1
-        NewPrototype.insert(NewPrototype.begin() + 2, NewPrototype[0]);
-      } else if (DefaultPolicy == Policy::TA && IsPrototypeDefaultTU) {
-        // Erase passthru for TA policy
-        NewPrototype.erase(NewPrototype.begin() + 2);
+  } else {
+    if (NF == 1) {
+      if (DefaultPolicy == Policy::TU && HasPassthruOp && !IsPrototypeDefaultTU)
+        NewPrototype.insert(NewPrototype.begin(), NewPrototype[0]);
+      else if (DefaultPolicy == Policy::TA && HasPassthruOp &&
+               IsPrototypeDefaultTU)
+        NewPrototype.erase(NewPrototype.begin() + 1);
+      if (DefaultScheme == PolicyScheme::HasPassthruOperandAtIdx1) {
+        if (DefaultPolicy == Policy::TU && !IsPrototypeDefaultTU) {
+          // Insert undisturbed output to index 1
+          NewPrototype.insert(NewPrototype.begin() + 2, NewPrototype[0]);
+        } else if (DefaultPolicy == Policy::TA && IsPrototypeDefaultTU) {
+          // Erase passthru for TA policy
+          NewPrototype.erase(NewPrototype.begin() + 2);
+        }
       }
+    } else if (DefaultPolicy == Policy::TU && HasPassthruOp) {
+      // NF > 1 cases for segment load operations.
+      // Convert
+      // (void, op0 address, op1 address, ...)
+      // to
+      // (void, op0 address, op1 address, maskedoff0, maskedoff1, ...)
+      PrototypeDescriptor MaskoffType = Prototype[1];
+      MaskoffType.TM &= ~static_cast<uint8_t>(TypeModifier::Pointer);
+      for (unsigned I = 0; I < NF; ++I)
+        NewPrototype.insert(NewPrototype.begin() + NF + 1, MaskoffType);
     }
-  }
+ }
 
   // If HasVL, append PrototypeDescriptor:VL to last operand
   if (HasVL)

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask_mf.c
index 960972b2ca317..5d86a43a0cb6f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask_mf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mask_mf.c
@@ -6922,3 +6922,55 @@ void test_vloxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
 void test_vloxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) {
   return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_tuma(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_tuma(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_tumu(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_tama(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_tama(v0, v1, mask, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_tamu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_tamu(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c
index 0194d81598726..8fd8a245b4198 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vloxseg_mf.c
@@ -6922,3 +6922,29 @@ void test_vloxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
 void test_vloxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) {
   return vloxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_tu(v0, v1, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_ta(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_ta(v0, v1, base, bindex, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c
index 5e49294263f05..582cb1a40acc1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsegff.c
@@ -7290,3 +7290,159 @@ void test_vlseg4e64ff_v_f64m2_m (vfloat64m2_t *v0, vfloat64m2_t *v1, vfloat64m2_
 void test_vlseg2e64ff_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t mask, vfloat64m4_t maskedoff0, vfloat64m4_t maskedoff1, const double *base, size_t *new_vl, size_t vl) {
   return vlseg2e64ff(v0, v1, mask, maskedoff0, maskedoff1, base, new_vl, vl);
 }
+
+// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_tu(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t *new_vl, size_t vl) {
+  return vlseg2e32ff_tu(v0, v1, merge0, merge1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_ta(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e32ff_v_u32mf2_ta(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) {
+  return vlseg2e32ff_ta(v0, v1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_tuma(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]], i32 2)
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e32ff_v_u32mf2_tuma(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t *new_vl, size_t vl) {
+  return vlseg2e32ff_tuma(v0, v1, mask, merge0, merge1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_tumu(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]], i32 0)
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e32ff_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t *new_vl, size_t vl) {
+  return vlseg2e32ff_tumu(v0, v1, mask, merge0, merge1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_tama(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]], i32 3)
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e32ff_v_u32mf2_tama(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, size_t *new_vl, size_t vl) {
+  return vlseg2e32ff_tama(v0, v1, mask, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_tamu(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i32(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]], i32 1)
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e32ff_v_u32mf2_tamu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t *new_vl, size_t vl) {
+  return vlseg2e32ff_tamu(v0, v1, mask, merge0, merge1, base, new_vl, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c
index 5f36466a69b89..0971710dc740e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlsseg.c
@@ -3646,3 +3646,80 @@ void test_vlsseg2e64_v_f64m4_m (vfloat64m4_t *v0, vfloat64m4_t *v1, vbool16_t ma
   return vlsseg2e64(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_tu(v0, v1, merge0, merge1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_ta(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_ta(v0, v1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_tuma(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_tuma(v0, v1, mask, merge0, merge1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_tumu(v0, v1, mask, merge0, merge1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_tama(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_tama(v0, v1, mask, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_tamu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_tamu(v0, v1, mask, merge0, merge1, base, bstride, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask_mf.c
index 6001d7404b330..130a57374aa64 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask_mf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mask_mf.c
@@ -6922,3 +6922,55 @@ void test_vluxseg7ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat3
 void test_vluxseg8ei64_v_f32mf2_m (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, vbool64_t mask, vfloat32mf2_t maskedoff0, vfloat32mf2_t maskedoff1, vfloat32mf2_t maskedoff2, vfloat32mf2_t maskedoff3, vfloat32mf2_t maskedoff4, vfloat32mf2_t maskedoff5, vfloat32mf2_t maskedoff6, vfloat32mf2_t maskedoff7, const float *base, vuint64m1_t bindex, size_t vl) {
   return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_i32mf2_tuma(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_tuma(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_tumu(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_i32mf2_tama(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_tama(v0, v1, mask, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_i32mf2_tamu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_tamu(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c
index cdc47077f4368..7c0e04bcfc84d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vluxseg_mf.c
@@ -6922,3 +6922,29 @@ void test_vluxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
 void test_vluxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) {
   return vluxseg8ei64(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_tu(v0, v1, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_i32mf2_ta(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_ta(v0, v1, base, bindex, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c
index d8708bf991eaa..25ba4f2835f6f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mask_mf.c
@@ -9051,3 +9051,54 @@ void test_vloxseg8ei64_v_f16mf2_m (vfloat16mf2_t *v0, vfloat16mf2_t *v1, vfloat1
   return vloxseg8ei64_v_f16mf2_m(v0, v1, v2, v3, v4, v5, v6, v7, mask, maskedoff0, maskedoff1, maskedoff2, maskedoff3, maskedoff4, maskedoff5, maskedoff6, maskedoff7, base, bindex, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_tuma(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_v_i32mf2_tuma(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_v_i32mf2_tumu(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_tama(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_v_i32mf2_tama(v0, v1, mask, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_tamu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_v_i32mf2_tamu(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c
index 2be9695a08162..630bdc36a7fe1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vloxseg_mf.c
@@ -6922,3 +6922,29 @@ void test_vloxseg7ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
 void test_vloxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32mf2_t *v2, vfloat32mf2_t *v3, vfloat32mf2_t *v4, vfloat32mf2_t *v5, vfloat32mf2_t *v6, vfloat32mf2_t *v7, const float *base, vuint64m1_t bindex, size_t vl) {
   return vloxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_v_i32mf2_tu(v0, v1, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vloxseg2ei32_v_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vloxseg2.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vloxseg2ei32_v_i32mf2_ta(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vloxseg2ei32_v_i32mf2_ta(v0, v1, base, bindex, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c
index 6765acbc4f27f..38be5158a4a42 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsegff.c
@@ -8204,3 +8204,55 @@ void test_vlseg4e16ff_v_f16m2 (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
 void test_vlseg2e16ff_v_f16m4 (vfloat16m4_t *v0, vfloat16m4_t *v1, const _Float16 *base, size_t *new_vl, size_t vl) {
   return vlseg2e16ff_v_f16m4(v0, v1, base, new_vl, vl);
 }
+
+// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_tu(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e32ff_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, size_t *new_vl, size_t vl) {
+  return vlseg2e32ff_v_u32mf2_tu(v0, v1, merge0, merge1, base, new_vl, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vlseg2e32ff_v_u32mf2_ta(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } @llvm.riscv.vlseg2ff.nxv1i32.i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 0
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 1
+// CHECK-RV32-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV32-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i32 } [[TMP0]], 2
+// CHECK-RV32-NEXT:    store i32 [[TMP3]], i32* [[NEW_VL:%.*]], align 4
+// CHECK-RV32-NEXT:    ret void
+//
+// CHECK-RV64-LABEL: @test_vlseg2e32ff_v_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } @llvm.riscv.vlseg2ff.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP3:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32>, i64 } [[TMP0]], 2
+// CHECK-RV64-NEXT:    store i64 [[TMP3]], i64* [[NEW_VL:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlseg2e32ff_v_u32mf2_ta(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, size_t *new_vl, size_t vl) {
+  return vlseg2e32ff_v_u32mf2_ta(v0, v1, base, new_vl, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c
index fc400e2f90709..7b25675f7ba2d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vlsseg.c
@@ -8198,3 +8198,81 @@ void test_vlsseg4e16_v_f16m2_m (vfloat16m2_t *v0, vfloat16m2_t *v1, vfloat16m2_t
 void test_vlsseg2e16_v_f16m4_m (vfloat16m4_t *v0, vfloat16m4_t *v1, vbool4_t mask, vfloat16m4_t maskedoff0, vfloat16m4_t maskedoff1, const _Float16 *base, ptr
diff _t bstride, size_t vl) {
   return vlsseg2e16_v_f16m4_m(v0, v1, mask, maskedoff0, maskedoff1, base, bstride, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_tu(vuint32mf2_t *v0, vuint32mf2_t *v1, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_v_u32mf2_tu(v0, v1, merge0, merge1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_ta(vuint32mf2_t *v0, vuint32mf2_t *v1, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_v_u32mf2_ta(v0, v1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_tuma(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_v_u32mf2_tuma(v0, v1, mask, merge0, merge1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_tumu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_v_u32mf2_tumu(v0, v1, mask, merge0, merge1, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_tama(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_v_u32mf2_tama(v0, v1, mask, base, bstride, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vlsseg2e32_v_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vlsseg2.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], i64 [[BSTRIDE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vlsseg2e32_v_u32mf2_tamu(vuint32mf2_t *v0, vuint32mf2_t *v1, vbool64_t mask, vuint32mf2_t merge0, vuint32mf2_t merge1, const uint32_t *base, ptr
diff _t bstride, size_t vl) {
+  return vlsseg2e32_v_u32mf2_tamu(v0, v1, mask, merge0, merge1, base, bstride, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c
index 90996cc8480a1..e4380bce6e1cc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mask_mf.c
@@ -9051,3 +9051,54 @@ void test_vluxseg2ei8_v_i8mf8_m (vint8mf8_t *v0, vint8mf8_t *v1, vbool64_t mask,
   return vluxseg2ei8_v_i8mf8_m(v0, v1, mask, maskedoff0, maskedoff1, base, bindex, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_i32mf2_tuma(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_v_i32mf2_tuma(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_i32mf2_tumu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_v_i32mf2_tumu(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_i32mf2_tama(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_v_i32mf2_tama(v0, v1, mask, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_i32mf2_tamu(vint32mf2_t *v0, vint32mf2_t *v1, vbool64_t mask, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_v_i32mf2_tamu(v0, v1, mask, merge0, merge1, base, bindex, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c
index 91e3cc18623c1..d19d6773efa71 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vluxseg_mf.c
@@ -6923,3 +6923,28 @@ void test_vluxseg8ei64_v_f32mf2 (vfloat32mf2_t *v0, vfloat32mf2_t *v1, vfloat32m
   return vluxseg8ei64_v_f32mf2(v0, v1, v2, v3, v4, v5, v6, v7, base, bindex, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE0:%.*]], <vscale x 1 x i32> [[MERGE1:%.*]], i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_i32mf2_tu(vint32mf2_t *v0, vint32mf2_t *v1, vint32mf2_t merge0, vint32mf2_t merge1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_v_i32mf2_tu(v0, v1, merge0, merge1, base, bindex, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vluxseg2ei32_v_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call { <vscale x 1 x i32>, <vscale x 1 x i32> } @llvm.riscv.vluxseg2.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, i32* [[BASE:%.*]], <vscale x 1 x i32> [[BINDEX:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP1:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 0
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP1]], <vscale x 1 x i32>* [[V0:%.*]], align 4
+// CHECK-RV64-NEXT:    [[TMP2:%.*]] = extractvalue { <vscale x 1 x i32>, <vscale x 1 x i32> } [[TMP0]], 1
+// CHECK-RV64-NEXT:    store <vscale x 1 x i32> [[TMP2]], <vscale x 1 x i32>* [[V1:%.*]], align 4
+// CHECK-RV64-NEXT:    ret void
+//
+void test_vluxseg2ei32_v_i32mf2_ta(vint32mf2_t *v0, vint32mf2_t *v1, const int32_t *base, vuint32mf2_t bindex, size_t vl) {
+  return vluxseg2ei32_v_i32mf2_ta(v0, v1, base, bindex, vl);
+}


        


More information about the cfe-commits mailing list