[clang] bb99d4b - [RISCV][Clang] Support policy functions for Vector Mask Instructions.

Zakk Chen via cfe-commits cfe-commits at lists.llvm.org
Tue Aug 2 10:31:51 PDT 2022


Author: Zakk Chen
Date: 2022-08-02T17:27:57Z
New Revision: bb99d4b11d84ae178d9ea3ea2c9da4d4c2e1eafc

URL: https://github.com/llvm/llvm-project/commit/bb99d4b11d84ae178d9ea3ea2c9da4d4c2e1eafc
DIFF: https://github.com/llvm/llvm-project/commit/bb99d4b11d84ae178d9ea3ea2c9da4d4c2e1eafc.diff

LOG: [RISCV][Clang] Support policy functions for Vector Mask Instructions.

We will switch all UndefValue to PoisonValue in follow up patches.

Reviewed By: kito-cheng

Differential Revision: https://reviews.llvm.org/D126749

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index f724dee49aea..c20600e4f2a0 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2082,7 +2082,10 @@ def vcpop : RVVMaskOp0Builtin<"um">;
 
 // 16.3. vfirst find-first-set mask bit
 def vfirst : RVVMaskOp0Builtin<"lm">;
+}
 
+let MaskedPolicyScheme = HasPassthruOperand,
+    HasTailPolicy = false in {
 // 16.4. vmsbf.m set-before-first mask bit
 def vmsbf : RVVMaskUnaryBuiltin;
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
index ddf3bb2a12d2..7111f495513b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vid.c
@@ -219,3 +219,39 @@ vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
                                size_t vl) {
   return vid(mask, maskedoff, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t merge, size_t vl) {
+  return vid_tu(merge, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, size_t vl) {
+  return vid_tuma(mask, merge, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, size_t vl) {
+  return vid_tumu(mask, merge, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, size_t vl) {
+  return vid_tamu(mask, merge, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
index ff2acb39fd91..7cc7b1fb0a2b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/viota.c
@@ -6,7 +6,7 @@
 
 // CHECK-RV64-LABEL: @test_viota_m_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
@@ -16,7 +16,7 @@ vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
@@ -26,7 +26,7 @@ vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
@@ -36,7 +36,7 @@ vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
@@ -46,7 +46,7 @@ vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
@@ -56,7 +56,7 @@ vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
@@ -66,7 +66,7 @@ vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
@@ -76,7 +76,7 @@ vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
@@ -86,7 +86,7 @@ vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
@@ -96,7 +96,7 @@ vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
@@ -106,7 +106,7 @@ vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
@@ -116,7 +116,7 @@ vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
@@ -126,7 +126,7 @@ vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
@@ -136,7 +136,7 @@ vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
@@ -146,7 +146,7 @@ vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
@@ -156,7 +156,7 @@ vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
@@ -166,7 +166,7 @@ vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
@@ -176,7 +176,7 @@ vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
@@ -186,7 +186,7 @@ vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
@@ -196,7 +196,7 @@ vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
@@ -206,7 +206,7 @@ vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
@@ -216,10 +216,46 @@ vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
                                  vbool8_t op1, size_t vl) {
   return viota(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t merge, vbool64_t op1, size_t vl) {
+  return viota_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) {
+  return viota_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) {
+  return viota_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) {
+  return viota_tamu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
index 7012f0ef5b82..22e87261a9eb 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsbf.c
@@ -122,3 +122,21 @@ vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
                              size_t vl) {
   return vmsbf(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {
+  return vmsbf_ma(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MERGE:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) {
+  return vmsbf_mu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
index 5e16eb0a3497..cf492e0f9e4d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsif.c
@@ -122,3 +122,21 @@ vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
                              size_t vl) {
   return vmsif(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmsif_m_b4_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {
+  return vmsif_ma(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsif_m_b4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MERGE:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) {
+  return vmsif_mu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
index 7330930687e6..463b0e6bc82f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmsof.c
@@ -122,3 +122,21 @@ vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
                              size_t vl) {
   return vmsof(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmsof_m_b4_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {
+  return vmsof_ma(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsof_m_b4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MERGE:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) {
+  return vmsof_mu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
index 77992a6658b8..fbbf04792f1e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vid.c
@@ -373,3 +373,57 @@ vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
                                size_t vl) {
   return vid_v_u64m8_m(mask, maskedoff, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_tu(vuint32mf2_t merge, size_t vl) {
+  return vid_v_u32mf2_tu(merge, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(<vscale x 1 x i32> undef, i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_ta(size_t vl) {
+  return vid_v_u32mf2_ta(vl);
+}
+
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, size_t vl) {
+  return vid_v_u32mf2_tuma(mask, merge, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, size_t vl) {
+  return vid_v_u32mf2_tumu(mask, merge, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_tama(vbool64_t mask, size_t vl) {
+  return vid_v_u32mf2_tama(mask, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vid_v_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vid_v_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, size_t vl) {
+  return vid_v_u32mf2_tamu(mask, merge, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
index 98756f0ffe88..ea1cd1c7de95 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/viota.c
@@ -204,7 +204,7 @@ vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) {
 
 // CHECK-RV64-LABEL: @test_viota_m_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
@@ -214,7 +214,7 @@ vuint8mf8_t test_viota_m_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
@@ -224,7 +224,7 @@ vuint8mf4_t test_viota_m_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
@@ -234,7 +234,7 @@ vuint8mf2_t test_viota_m_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
@@ -244,7 +244,7 @@ vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
@@ -254,7 +254,7 @@ vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
@@ -264,7 +264,7 @@ vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
@@ -274,7 +274,7 @@ vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
@@ -284,7 +284,7 @@ vuint16mf4_t test_viota_m_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
@@ -294,7 +294,7 @@ vuint16mf2_t test_viota_m_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
@@ -304,7 +304,7 @@ vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
@@ -314,7 +314,7 @@ vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
@@ -324,7 +324,7 @@ vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
@@ -334,7 +334,7 @@ vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
@@ -344,7 +344,7 @@ vuint32mf2_t test_viota_m_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
@@ -354,7 +354,7 @@ vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
@@ -364,7 +364,7 @@ vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
@@ -374,7 +374,7 @@ vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
@@ -384,7 +384,7 @@ vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
@@ -394,7 +394,7 @@ vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
@@ -404,7 +404,7 @@ vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
@@ -414,10 +414,64 @@ vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_viota_m_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
                                  vbool8_t op1, size_t vl) {
   return viota_m_u64m8_m(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_tu(vuint32mf2_t merge, vbool64_t op1, size_t vl) {
+  return viota_m_u32mf2_tu(merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_ta(vbool64_t op1, size_t vl) {
+  return viota_m_u32mf2_ta(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) {
+  return viota_m_u32mf2_tuma(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) {
+  return viota_m_u32mf2_tumu(mask, merge, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_tama(vbool64_t mask, vbool64_t op1, size_t vl) {
+  return viota_m_u32mf2_tama(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_viota_m_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_viota_m_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vbool64_t op1, size_t vl) {
+  return viota_m_u32mf2_tamu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
index 059e4fef4524..af2e9f88c6f9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsbf.c
@@ -136,3 +136,21 @@ vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
                              size_t vl) {
   return vmsbf_m_b64_m(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {
+  return vmsbf_m_b4_ma(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsbf_m_b4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MERGE:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsbf_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) {
+  return vmsbf_m_b4_mu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
index 8cbb7b282b1a..7165cd232e77 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsif.c
@@ -136,3 +136,21 @@ vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
                              size_t vl) {
   return vmsif_m_b64_m(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmsif_m_b4_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {
+  return vmsif_m_b4_ma(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsif_m_b4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MERGE:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsif_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) {
+  return vmsif_m_b4_mu(mask, merge, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
index 70b142302d43..ffabfdacbac8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmsof.c
@@ -136,3 +136,21 @@ vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,
                              size_t vl) {
   return vmsof_m_b64_m(mask, maskedoff, op1, vl);
 }
+
+// CHECK-RV64-LABEL: @test_vmsof_m_b4_ma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> undef, <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4_ma(vbool4_t mask, vbool4_t op1, size_t vl) {
+  return vmsof_m_b4_ma(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vmsof_m_b4_mu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MERGE:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i1> [[TMP0]]
+//
+vbool4_t test_vmsof_m_b4_mu(vbool4_t mask, vbool4_t merge, vbool4_t op1, size_t vl) {
+  return vmsof_m_b4_mu(mask, merge, op1, vl);
+}


        


More information about the cfe-commits mailing list