[clang] b1b22b4 - [RISCV][Clang] Support policy functions for vneg, vnot, vncvt, vwcvt,

Zakk Chen via cfe-commits cfe-commits at lists.llvm.org
Tue Aug 2 10:31:43 PDT 2022


Author: Zakk Chen
Date: 2022-08-02T17:27:55Z
New Revision: b1b22b4a85ac3593a41d9127b91985866d69cfd7

URL: https://github.com/llvm/llvm-project/commit/b1b22b4a85ac3593a41d9127b91985866d69cfd7
DIFF: https://github.com/llvm/llvm-project/commit/b1b22b4a85ac3593a41d9127b91985866d69cfd7.diff

LOG: [RISCV][Clang] Support policy functions for vneg, vnot, vncvt, vwcvt,
vwcvtu, vfabs and vfneg.

We will switch all UndefValue to PoisonValue in follow up patches.

Reviewed By: kito-cheng

Differential Revision: https://reviews.llvm.org/D126744

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 2cfb185171e69..a36ff62231581 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -1282,27 +1282,28 @@ multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> {
   let Name = NAME,
       IRName = IR,
       MaskedIRName = IR # "_mask",
+      UnMaskedPolicyScheme = HasPassthruOperand,
       ManualCodegen = [{
       {
-        // op1, vl
-        IntrinsicTypes = {ResultType,
-                          cast<llvm::VectorType>(ResultType)->getElementType(),
-                          Ops[1]->getType()};
-        Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[1]));
-        // insert undef passthru
-        Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+        if (DefaultPolicy == TAIL_AGNOSTIC)
+          Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+        auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
+        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
+        // passthru, op1, op2, vl
+        IntrinsicTypes = {ResultType, ElemTy, Ops[3]->getType()};
         break;
       }
       }],
       MaskedManualCodegen = [{
       {
         std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
-        Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED));
-        // maskedoff, op1, mask, vl
-        IntrinsicTypes = {ResultType,
-                          cast<llvm::VectorType>(ResultType)->getElementType(),
-                          Ops[3]->getType()};
-        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[1]));
+        if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
+          Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+        auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
+        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
+        Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
+        // maskedoff, op1, op2, mask, vl, policy
+        IntrinsicTypes = {ResultType, ElemTy, Ops[4]->getType()};
         break;
       }
       }] in {
@@ -1314,29 +1315,34 @@ multiclass RVVPseudoVNotBuiltin<string IR, string type_range> {
   let Name = NAME,
       IRName = IR,
       MaskedIRName = IR # "_mask",
+      UnMaskedPolicyScheme = HasPassthruOperand,
       ManualCodegen = [{
       {
-        // op1, vl
+        if (DefaultPolicy == TAIL_AGNOSTIC)
+          Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+        auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
+        Ops.insert(Ops.begin() + 2,
+                   llvm::Constant::getAllOnesValue(ElemTy));
+        // passthru, op1, op2, vl
         IntrinsicTypes = {ResultType,
-                          cast<llvm::VectorType>(ResultType)->getElementType(),
-                          Ops[1]->getType()};
-        Ops.insert(Ops.begin() + 1,
-                   llvm::Constant::getAllOnesValue(IntrinsicTypes[1]));
-        // insert undef passthru
-        Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+                          ElemTy,
+                          Ops[3]->getType()};
         break;
       }
       }],
       MaskedManualCodegen = [{
       {
         std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
-        Ops.push_back(ConstantInt::get(Ops.back()->getType(), TAIL_UNDISTURBED));
-        // maskedoff, op1, mask, vl
-        IntrinsicTypes = {ResultType,
-                          cast<llvm::VectorType>(ResultType)->getElementType(),
-                          Ops[3]->getType()};
+        if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
+          Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+        auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
         Ops.insert(Ops.begin() + 2,
-                   llvm::Constant::getAllOnesValue(IntrinsicTypes[1]));
+                   llvm::Constant::getAllOnesValue(ElemTy));
+        Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
+        // maskedoff, op1, po2, mask, vl, policy
+        IntrinsicTypes = {ResultType,
+                          ElemTy,
+                          Ops[4]->getType()};
         break;
       }
       }] in {
@@ -1366,28 +1372,29 @@ multiclass RVVPseudoVFUnaryBuiltin<string IR, string type_range> {
   let Name = NAME,
       IRName = IR,
       MaskedIRName = IR # "_mask",
+      UnMaskedPolicyScheme = HasPassthruOperand,
       ManualCodegen = [{
       {
-        // op1, vl
-        IntrinsicTypes = {ResultType,
-                          Ops[0]->getType(), Ops[1]->getType()};
-        Ops.insert(Ops.begin() + 1, Ops[0]);
         if (DefaultPolicy == TAIL_AGNOSTIC)
           Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+        // op1, po2, vl
+        IntrinsicTypes = {ResultType,
+                          Ops[1]->getType(), Ops[2]->getType()};
+        Ops.insert(Ops.begin() + 2, Ops[1]);
         break;
       }
       }],
       MaskedManualCodegen = [{
       {
         std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
-        Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
-        // maskedoff, op1, mask, vl
-        IntrinsicTypes = {ResultType,
-                          Ops[1]->getType(),
-                          Ops[3]->getType()};
-        Ops.insert(Ops.begin() + 2, Ops[1]);
         if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
           Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+        Ops.insert(Ops.begin() + 2, Ops[1]);
+        Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
+        // maskedoff, op1, op2, mask, vl
+        IntrinsicTypes = {ResultType,
+                          Ops[2]->getType(),
+                          Ops.back()->getType()};
         break;
       }
       }] in {
@@ -1401,31 +1408,34 @@ multiclass RVVPseudoVWCVTBuiltin<string IR, string MName, string type_range,
       OverloadedName = MName,
       IRName = IR,
       MaskedIRName = IR # "_mask",
+      UnMaskedPolicyScheme = HasPassthruOperand,
       ManualCodegen = [{
       {
-        // op1, vl
-        IntrinsicTypes = {ResultType,
-                          Ops[0]->getType(),
-                          cast<llvm::VectorType>(Ops[0]->getType())->getElementType(),
-                          Ops[1]->getType()};
-        Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2]));
         if (DefaultPolicy == TAIL_AGNOSTIC)
           Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+        auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
+        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
+        // passtru, op1, op2, vl
+        IntrinsicTypes = {ResultType,
+                          Ops[1]->getType(),
+                          ElemTy,
+                          Ops[3]->getType()};
         break;
       }
       }],
       MaskedManualCodegen = [{
       {
         std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+        if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
+          Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+        auto ElemTy = cast<llvm::VectorType>(ResultType)->getElementType();
+        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(ElemTy));
         Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
-        // maskedoff, op1, mask, vl
+        // maskedoff, op1, op2, mask, vl, policy
         IntrinsicTypes = {ResultType,
                           Ops[1]->getType(),
-                          cast<llvm::VectorType>(Ops[1]->getType())->getElementType(),
-                          Ops[3]->getType()};
-        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[2]));
-        if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
-          Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+                          ElemTy,
+                          Ops[4]->getType()};
         break;
       }
       }] in {
@@ -1441,31 +1451,32 @@ multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range,
       OverloadedName = MName,
       IRName = IR,
       MaskedIRName = IR # "_mask",
+      UnMaskedPolicyScheme = HasPassthruOperand,
       ManualCodegen = [{
       {
-        // op1, vl
-        IntrinsicTypes = {ResultType,
-                          Ops[0]->getType(),
-                          Ops[1]->getType(),
-                          Ops[1]->getType()};
-        Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2]));
         if (DefaultPolicy == TAIL_AGNOSTIC)
           Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType()));
+        // passthru, op1, xlen, vl
+        IntrinsicTypes = {ResultType,
+                          Ops[1]->getType(),
+                          Ops[3]->getType(),
+                          Ops[3]->getType()};
         break;
       }
       }],
       MaskedManualCodegen = [{
       {
         std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+        if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
+          Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(Ops.back()->getType()));
         Ops.push_back(ConstantInt::get(Ops.back()->getType(), DefaultPolicy));
-        // maskedoff, op1, mask, vl
+        // maskedoff, op1, xlen, mask, vl
         IntrinsicTypes = {ResultType,
                           Ops[1]->getType(),
-                          Ops[3]->getType(),
-                          Ops[3]->getType()};
-        Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[2]));
-        if (DefaultPolicy == TAIL_AGNOSTIC_MASK_AGNOSTIC)
-          Ops.insert(Ops.begin(), llvm::UndefValue::get(ResultType));
+                          Ops[4]->getType(),
+                          Ops[4]->getType()};
         break;
       }
       }] in {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
index 7d098fa95e125..f91e0e3497889 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfabs.c
@@ -167,3 +167,56 @@ vfloat64m8_t test_vfabs_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat
   return vfabs(mask, maskedoff, op1, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfabs_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
+  return vfabs_ta(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfabs_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfabs_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
+  return vfabs_tama(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfabs_tamu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
index c688b4c9ae1bc..fa33a4ef03e09 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfneg.c
@@ -167,3 +167,56 @@ vfloat64m8_t test_vfneg_v_f64m8_m (vbool8_t mask, vfloat64m8_t maskedoff, vfloat
   return vfneg(mask, maskedoff, op1, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfneg_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
+  return vfneg_ta(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfneg_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfneg_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
+  return vfneg_tama(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfneg_tamu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
index 104a5746c1e42..77bde642e43db 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vncvt.c
@@ -544,3 +544,110 @@ vuint32m4_t test_vncvt_x_x_w_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuin
   return vncvt_x(mask, maskedoff, src, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t merge, vint32mf2_t src, size_t vl) {
+  return vncvt_x_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
+  return vncvt_x_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_ta(vint32mf2_t src, size_t vl) {
+  return vncvt_x_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_ta(vuint32mf2_t src, size_t vl) {
+  return vncvt_x_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) {
+  return vncvt_x_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
+  return vncvt_x_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) {
+  return vncvt_x_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
+  return vncvt_x_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
+  return vncvt_x_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
+  return vncvt_x_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) {
+  return vncvt_x_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
+  return vncvt_x_tamu(mask, merge, src, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
index d6d3ea9188a31..71da8a10a5f99 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vneg.c
@@ -401,3 +401,56 @@ vint64m8_t test_vneg_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vneg(mask, maskedoff, op1, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vneg_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
+  return vneg_ta(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vneg_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vneg_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {
+  return vneg_tama(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vneg_tamu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
index 832884af1700a..8d5459ab3dfd9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnot.c
@@ -796,3 +796,111 @@ vuint64m4_t test_vnot_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m
 vuint64m8_t test_vnot_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) {
   return vnot(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vnot_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+  return vnot_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
+  return vnot_ta(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_ta(vuint32mf2_t op1, size_t vl) {
+  return vnot_ta(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vnot_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+  return vnot_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vnot_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+  return vnot_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {
+  return vnot_tama(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t vl) {
+  return vnot_tama(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vnot_tamu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+  return vnot_tamu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
index f431ea506d28c..92169803fbec7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwcvt.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
@@ -33,7 +33,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
@@ -42,7 +42,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
@@ -51,7 +51,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
@@ -60,7 +60,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
@@ -69,7 +69,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
@@ -78,7 +78,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
@@ -87,7 +87,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
@@ -96,7 +96,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
@@ -105,7 +105,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
@@ -114,7 +114,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
@@ -123,7 +123,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
@@ -132,7 +132,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
@@ -141,7 +141,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
@@ -150,7 +150,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
@@ -159,7 +159,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
@@ -168,7 +168,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
@@ -177,7 +177,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
@@ -186,7 +186,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
@@ -195,7 +195,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
@@ -204,7 +204,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
@@ -213,7 +213,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
@@ -222,7 +222,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
@@ -231,7 +231,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
@@ -240,7 +240,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
@@ -249,7 +249,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
@@ -258,7 +258,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
@@ -267,7 +267,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) {
@@ -276,7 +276,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
@@ -285,7 +285,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vi
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
@@ -294,7 +294,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vi
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) {
@@ -303,7 +303,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) {
@@ -312,7 +312,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) {
@@ -321,7 +321,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i16 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) {
@@ -330,7 +330,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
@@ -339,7 +339,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
@@ -348,7 +348,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
@@ -357,7 +357,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) {
@@ -366,7 +366,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vui
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) {
@@ -375,7 +375,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vui
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i16 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) {
@@ -384,7 +384,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vui
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) {
@@ -393,7 +393,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vi
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) {
@@ -402,7 +402,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint1
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) {
@@ -411,7 +411,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint1
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) {
@@ -420,7 +420,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) {
@@ -429,7 +429,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) {
@@ -438,7 +438,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) {
@@ -447,7 +447,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) {
@@ -456,7 +456,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) {
@@ -465,7 +465,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vui
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) {
@@ -474,7 +474,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vui
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) {
@@ -483,7 +483,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint3
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) {
@@ -492,7 +492,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint3
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) {
@@ -501,7 +501,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint3
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) {
@@ -510,7 +510,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) {
@@ -519,7 +519,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) {
@@ -528,7 +528,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) {
@@ -537,10 +537,117 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwcvtu_x_x_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) {
   return vwcvtu_x(mask, maskedoff, src, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t merge, vint32mf2_t src, size_t vl) {
+  return vwcvt_x_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t merge, vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_ta(vint32mf2_t src, size_t vl) {
+  return vwcvt_x_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_ta(vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) {
+  return vwcvt_x_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) {
+  return vwcvt_x_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
+  return vwcvt_x_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) {
+  return vwcvt_x_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_tamu(mask, merge, src, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
index 9a76058f4a0b3..7a5315f246db0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfabs.c
@@ -275,3 +275,57 @@ vfloat16m4_t test_vfabs_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat
 vfloat16m8_t test_vfabs_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
   return vfabs_v_f16m8_m(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfabs_v_f32mf2_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
+  return vfabs_v_f32mf2_ta(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfabs_v_f32mf2_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfabs_v_f32mf2_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
+  return vfabs_v_f32mf2_tama(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfabs_v_f32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfabs_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfabs_v_f32mf2_tamu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
index b809d6cf405ea..0f720e9769a36 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfneg.c
@@ -275,3 +275,57 @@ vfloat16m4_t test_vfneg_v_f16m4_m (vbool4_t mask, vfloat16m4_t maskedoff, vfloat
 vfloat16m8_t test_vfneg_v_f16m8_m (vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
   return vfneg_v_f16m8_m(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_tu(vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfneg_v_f32mf2_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_ta(vfloat32mf2_t op1, size_t vl) {
+  return vfneg_v_f32mf2_ta(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_tuma(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfneg_v_f32mf2_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_tumu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfneg_v_f32mf2_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_tama(vbool64_t mask, vfloat32mf2_t op1, size_t vl) {
+  return vfneg_v_f32mf2_tama(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vfneg_v_f32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP1]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vfneg_v_f32mf2_tamu(vbool64_t mask, vfloat32mf2_t merge, vfloat32mf2_t op1, size_t vl) {
+  return vfneg_v_f32mf2_tamu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
index 3b5dd626c29d5..e1126340ba296 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vncvt.c
@@ -544,3 +544,110 @@ vuint32m4_t test_vncvt_x_x_w_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuin
   return vncvt_x_x_w_u32m4_m(mask, maskedoff, src, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_tu(vint16mf4_t merge, vint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_i16mf4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_tu(vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_u16mf4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_ta(vint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_i16mf4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_ta(vuint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_u16mf4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_i16mf4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_u16mf4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_i16mf4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_u16mf4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_i16mf4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_u16mf4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_i16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vncvt_x_x_w_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_i16mf4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vncvt_x_x_w_u16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnsrl.mask.nxv1i16.nxv1i32.i64.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vncvt_x_x_w_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint32mf2_t src, size_t vl) {
+  return vncvt_x_x_w_u16mf4_tamu(mask, merge, src, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
index 8501b4c2e1f06..1dd2cba3d9b0b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vneg.c
@@ -401,3 +401,56 @@ vint64m8_t test_vneg_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint64m8_t
   return vneg_v_i64m8_m(mask, maskedoff, op1, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vneg_v_i32mf2_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
+  return vneg_v_i32mf2_ta(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vneg_v_i32mf2_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vneg_v_i32mf2_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {
+  return vneg_v_i32mf2_tama(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vneg_v_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vneg_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vneg_v_i32mf2_tamu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
index 44bca17f635c7..9f6f46dc60e0e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnot.c
@@ -796,3 +796,111 @@ vuint64m4_t test_vnot_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint64m
 vuint64m8_t test_vnot_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t vl) {
   return vnot_v_u64m8_m(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vnot_v_i32mf2_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+  return vnot_v_u32mf2_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_ta(vint32mf2_t op1, size_t vl) {
+  return vnot_v_i32mf2_ta(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_ta(vuint32mf2_t op1, size_t vl) {
+  return vnot_v_u32mf2_ta(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vnot_v_i32mf2_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+  return vnot_v_u32mf2_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vnot_v_i32mf2_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+  return vnot_v_u32mf2_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, size_t vl) {
+  return vnot_v_i32mf2_tama(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, size_t vl) {
+  return vnot_v_u32mf2_tama(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vnot_v_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, size_t vl) {
+  return vnot_v_i32mf2_tamu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vnot_v_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vxor.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 -1, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vnot_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+  return vnot_v_u32mf2_tamu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c
index 03d06da6b0b9b..ace27f1a6db43 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwcvt.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
@@ -15,7 +15,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
@@ -24,7 +24,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
@@ -33,7 +33,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
@@ -42,7 +42,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
@@ -51,7 +51,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
@@ -60,7 +60,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
@@ -69,7 +69,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
@@ -78,7 +78,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
@@ -87,7 +87,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
@@ -96,7 +96,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
@@ -105,7 +105,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
@@ -114,7 +114,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
@@ -123,7 +123,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
@@ -132,7 +132,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
@@ -141,7 +141,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
@@ -150,7 +150,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
@@ -159,7 +159,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
@@ -168,7 +168,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
@@ -177,7 +177,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
@@ -186,7 +186,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
@@ -195,7 +195,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
@@ -204,7 +204,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
@@ -213,7 +213,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
@@ -222,7 +222,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
@@ -231,7 +231,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
@@ -240,7 +240,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
@@ -249,7 +249,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
@@ -258,7 +258,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
@@ -267,7 +267,7 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) {
@@ -276,7 +276,7 @@ vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
@@ -285,7 +285,7 @@ vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vi
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
@@ -294,7 +294,7 @@ vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vi
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) {
@@ -303,7 +303,7 @@ vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) {
@@ -312,7 +312,7 @@ vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) {
@@ -321,7 +321,7 @@ vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i16 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) {
@@ -330,7 +330,7 @@ vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
@@ -339,7 +339,7 @@ vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
@@ -348,7 +348,7 @@ vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
@@ -357,7 +357,7 @@ vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) {
@@ -366,7 +366,7 @@ vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vui
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) {
@@ -375,7 +375,7 @@ vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vui
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i16 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) {
@@ -384,7 +384,7 @@ vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vui
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) {
@@ -393,7 +393,7 @@ vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vi
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) {
@@ -402,7 +402,7 @@ vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint1
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) {
@@ -411,7 +411,7 @@ vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint1
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) {
@@ -420,7 +420,7 @@ vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) {
@@ -429,7 +429,7 @@ vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) {
@@ -438,7 +438,7 @@ vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) {
@@ -447,7 +447,7 @@ vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) {
@@ -456,7 +456,7 @@ vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) {
@@ -465,7 +465,7 @@ vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vui
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i32 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) {
@@ -474,7 +474,7 @@ vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vui
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) {
@@ -483,7 +483,7 @@ vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint3
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) {
@@ -492,7 +492,7 @@ vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint3
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) {
@@ -501,7 +501,7 @@ vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint3
 
 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) {
@@ -510,7 +510,7 @@ vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) {
@@ -519,7 +519,7 @@ vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) {
@@ -528,7 +528,7 @@ vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) {
@@ -537,10 +537,117 @@ vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vu
 
 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwcvtu_x_x_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) {
   return vwcvtu_x_x_v_u64m8_m(mask, maskedoff, src, vl);
 }
 
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_tu(vint64m1_t merge, vint32mf2_t src, size_t vl) {
+  return vwcvt_x_x_v_i64m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_tu(vuint64m1_t merge, vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_x_v_u64m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_ta(vint32mf2_t src, size_t vl) {
+  return vwcvt_x_x_v_i64m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_ta(vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_x_v_u64m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) {
+  return vwcvt_x_x_v_i64m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_x_v_u64m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) {
+  return vwcvt_x_x_v_i64m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_x_v_u64m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
+  return vwcvt_x_x_v_i64m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_x_v_u64m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vwcvt_x_x_v_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint32mf2_t src, size_t vl) {
+  return vwcvt_x_x_v_i64m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vwcvtu_x_x_v_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint32mf2_t src, size_t vl) {
+  return vwcvtu_x_x_v_u64m1_tamu(mask, merge, src, vl);
+}


        


More information about the cfe-commits mailing list