[clang] 08cf69c - [RISCV] Support overloading for RVV miscellaneous functions.

Zakk Chen via cfe-commits cfe-commits at lists.llvm.org
Tue Jul 13 21:35:51 PDT 2021


Author: Zakk Chen
Date: 2021-07-13T21:35:37-07:00
New Revision: 08cf69c31f849310ec45945d18f0feef4ea8f2e6

URL: https://github.com/llvm/llvm-project/commit/08cf69c31f849310ec45945d18f0feef4ea8f2e6
DIFF: https://github.com/llvm/llvm-project/commit/08cf69c31f849310ec45945d18f0feef4ea8f2e6.diff

LOG: [RISCV] Support overloading for RVV miscellaneous functions.

Based on this update to the intrinsic doc
https://github.com/riscv/rvv-intrinsic-doc/pull/103

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D105611

Added: 
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vreinterpret.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/utils/TableGen/RISCVVEmitter.cpp

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 495e5ef45218d..336f02cd9c25b 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -131,7 +131,7 @@
 // There are a number of attributes that are used to constraint the number and
 // shape of the builtins generated. Refer to the comments below for them.
 class RVVBuiltin<string suffix, string prototype, string type_range,
-                 string managed_suffix = ""> {
+                 string mangled_suffix = ""> {
   // Base name that will be prepended in __builtin_rvv_ and appended the
   // computed Suffix.
   string Name = NAME;
@@ -145,6 +145,10 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
   // It's used for describe some special naming cases.
   string MangledName = "";
 
+  // If not empty, each MangledName will have this appended after an
+  // underscore (_). It is instantiated like Prototype.
+  string MangledSuffix = mangled_suffix;
+
   // The 
diff erent variants of the builtin, parameterised with a type.
   string TypeRange = type_range;
 
@@ -1711,29 +1715,30 @@ let HasMask = false,
 }
 
 // Miscellaneous
-let HasMask = false, HasVL = false, HasNoMaskedOverloaded = false,
-    IRName = "" in {
+let HasMask = false, HasVL = false, IRName = "" in {
   let Name = "vreinterpret_v",
       ManualCodegen = [{
         return Builder.CreateBitCast(Ops[0], ResultType);
       }] in {
     // Reinterpret between 
diff erent type under the same SEW and LMUL
-    def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil">;
-    def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "il">;
-    def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil">;
-    def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "il">;
-    def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "il">;
-    def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "il">;
+    def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil", "v">;
+    def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "il", "v">;
+    def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil", "Uv">;
+    def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "il", "Uv">;
+    def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "il", "Fv">;
+    def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "il", "Fv">;
 
     // Reinterpret between 
diff erent SEW under the same LMUL
     foreach dst_sew = ["(FixedSEW:8)", "(FixedSEW:16)", "(FixedSEW:32)",
                        "(FixedSEW:64)"] in {
-      def vreinterpret_i_ # dst_sew : RVVBuiltin<"v" # dst_sew # "v", dst_sew # "vv", "csil">;
-      def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv", dst_sew # "UvUv", "csil">;
+      def vreinterpret_i_ # dst_sew : RVVBuiltin<"v" # dst_sew # "v",
+                                                 dst_sew # "vv", "csil", dst_sew # "v">;
+      def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv",
+                                                 dst_sew # "UvUv", "csil", dst_sew # "Uv">;
     }
   }
 
-  let Name = "vundefined",
+  let Name = "vundefined", HasNoMaskedOverloaded = false,
       ManualCodegen = [{
         return llvm::UndefValue::get(ResultType);
       }] in {
@@ -1743,7 +1748,7 @@ let HasMask = false, HasVL = false, HasNoMaskedOverloaded = false,
 
   // LMUL truncation
   // C/C++ Operand: VecTy, IR Operand: VecTy, Index
-  let Name = "vlmul_trunc_v",
+  let Name = "vlmul_trunc_v", MangledName = "vlmul_trunc",
       ManualCodegen = [{ {
         ID = Intrinsic::experimental_vector_extract;
         IntrinsicTypes = {ResultType, Ops[0]->getType()};
@@ -1752,14 +1757,16 @@ let HasMask = false, HasVL = false, HasNoMaskedOverloaded = false,
       } }] in {
     foreach dst_lmul = ["(SFixedLog2LMUL:-3)", "(SFixedLog2LMUL:-2)", "(SFixedLog2LMUL:-1)",
                         "(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
-      def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilfd">;
-      def vlmul_trunc_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUv", "csil">;
+      def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v",
+                                              dst_lmul # "vv", "csilfd", dst_lmul # "v">;
+      def vlmul_trunc_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv",
+                                                dst_lmul # "UvUv", "csil", dst_lmul # "Uv">;
     }
   }
 
   // LMUL extension
   // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index
-  let Name = "vlmul_ext_v",
+  let Name = "vlmul_ext_v", MangledName = "vlmul_ext",
       ManualCodegen = [{
         ID = Intrinsic::experimental_vector_insert;
         IntrinsicTypes = {ResultType, Ops[0]->getType()};
@@ -1770,8 +1777,10 @@ let HasMask = false, HasVL = false, HasNoMaskedOverloaded = false,
       }] in {
     foreach dst_lmul = ["(LFixedLog2LMUL:-2)", "(LFixedLog2LMUL:-1)", "(LFixedLog2LMUL:-0)",
                         "(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
-      def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vv", "csilfd">;
-      def vlmul_ext_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUv", "csil">;
+      def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v",
+                                            dst_lmul # "vv", "csilfd", dst_lmul # "v">;
+      def vlmul_ext_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv",
+                                              dst_lmul # "UvUv", "csil", dst_lmul # "Uv">;
     }
   }
 
@@ -1788,8 +1797,8 @@ let HasMask = false, HasVL = false, HasNoMaskedOverloaded = false,
       }
       }] in {
     foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
-      def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilfd">;
-      def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUvKz", "csil">;
+      def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilfd", dst_lmul # "v">;
+      def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUvKz", "csil", dst_lmul # "Uv">;
     }
   }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c
new file mode 100644
index 0000000000000..bf8d1e46b0353
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vget.c
@@ -0,0 +1,547 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vget_v_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vget_v_i8m2_i8m1(vint8m2_t src, size_t index) {
+  return vget_i8m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vget_v_i8m4_i8m1(vint8m4_t src, size_t index) {
+  return vget_i8m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vget_v_i8m8_i8m1(vint8m8_t src, size_t index) {
+  return vget_i8m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i8m4_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vget_v_i8m4_i8m2(vint8m4_t src, size_t index) {
+  return vget_i8m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vget_v_i8m8_i8m2(vint8m8_t src, size_t index) {
+  return vget_i8m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i8m8_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vget_v_i8m8_i8m4(vint8m8_t src, size_t index) {
+  return vget_i8m4(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vget_v_u8m2_u8m1(vuint8m2_t src, size_t index) {
+  return vget_u8m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vget_v_u8m4_u8m1(vuint8m4_t src, size_t index) {
+  return vget_u8m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vget_v_u8m8_u8m1(vuint8m8_t src, size_t index) {
+  return vget_u8m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u8m4_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vget_v_u8m4_u8m2(vuint8m4_t src, size_t index) {
+  return vget_u8m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vget_v_u8m8_u8m2(vuint8m8_t src, size_t index) {
+  return vget_u8m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u8m8_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vget_v_u8m8_u8m4(vuint8m8_t src, size_t index) {
+  return vget_u8m4(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vget_v_i16m2_i16m1(vint16m2_t src, size_t index) {
+  return vget_i16m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vget_v_i16m4_i16m1(vint16m4_t src, size_t index) {
+  return vget_i16m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vget_v_i16m8_i16m1(vint16m8_t src, size_t index) {
+  return vget_i16m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i16m4_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vget_v_i16m4_i16m2(vint16m4_t src, size_t index) {
+  return vget_i16m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vget_v_i16m8_i16m2(vint16m8_t src, size_t index) {
+  return vget_i16m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i16m8_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vget_v_i16m8_i16m4(vint16m8_t src, size_t index) {
+  return vget_i16m4(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vget_v_u16m2_u16m1(vuint16m2_t src, size_t index) {
+  return vget_u16m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vget_v_u16m4_u16m1(vuint16m4_t src, size_t index) {
+  return vget_u16m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vget_v_u16m8_u16m1(vuint16m8_t src, size_t index) {
+  return vget_u16m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u16m4_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vget_v_u16m4_u16m2(vuint16m4_t src, size_t index) {
+  return vget_u16m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vget_v_u16m8_u16m2(vuint16m8_t src, size_t index) {
+  return vget_u16m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u16m8_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vget_v_u16m8_u16m4(vuint16m8_t src, size_t index) {
+  return vget_u16m4(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vget_v_i32m2_i32m1(vint32m2_t src, size_t index) {
+  return vget_i32m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vget_v_i32m4_i32m1(vint32m4_t src, size_t index) {
+  return vget_i32m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vget_v_i32m8_i32m1(vint32m8_t src, size_t index) {
+  return vget_i32m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i32m4_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vget_v_i32m4_i32m2(vint32m4_t src, size_t index) {
+  return vget_i32m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vget_v_i32m8_i32m2(vint32m8_t src, size_t index) {
+  return vget_i32m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i32m8_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vget_v_i32m8_i32m4(vint32m8_t src, size_t index) {
+  return vget_i32m4(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vget_v_u32m2_u32m1(vuint32m2_t src, size_t index) {
+  return vget_u32m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vget_v_u32m4_u32m1(vuint32m4_t src, size_t index) {
+  return vget_u32m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vget_v_u32m8_u32m1(vuint32m8_t src, size_t index) {
+  return vget_u32m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u32m4_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vget_v_u32m4_u32m2(vuint32m4_t src, size_t index) {
+  return vget_u32m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vget_v_u32m8_u32m2(vuint32m8_t src, size_t index) {
+  return vget_u32m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u32m8_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vget_v_u32m8_u32m4(vuint32m8_t src, size_t index) {
+  return vget_u32m4(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f32m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vget_v_f32m2_f32m1(vfloat32m2_t src, size_t index) {
+  return vget_f32m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vget_v_f32m4_f32m1(vfloat32m4_t src, size_t index) {
+  return vget_f32m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vget_v_f32m8_f32m1(vfloat32m8_t src, size_t index) {
+  return vget_f32m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f32m4_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vget_v_f32m4_f32m2(vfloat32m4_t src, size_t index) {
+  return vget_f32m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vget_v_f32m8_f32m2(vfloat32m8_t src, size_t index) {
+  return vget_f32m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f32m8_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vget_v_f32m8_f32m4(vfloat32m8_t src, size_t index) {
+  return vget_f32m4(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vget_v_i64m2_i64m1(vint64m2_t src, size_t index) {
+  return vget_i64m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vget_v_i64m4_i64m1(vint64m4_t src, size_t index) {
+  return vget_i64m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vget_v_i64m8_i64m1(vint64m8_t src, size_t index) {
+  return vget_i64m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i64m4_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vget_v_i64m4_i64m2(vint64m4_t src, size_t index) {
+  return vget_i64m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vget_v_i64m8_i64m2(vint64m8_t src, size_t index) {
+  return vget_i64m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_i64m8_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vget_v_i64m8_i64m4(vint64m8_t src, size_t index) {
+  return vget_i64m4(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vget_v_u64m2_u64m1(vuint64m2_t src, size_t index) {
+  return vget_u64m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vget_v_u64m4_u64m1(vuint64m4_t src, size_t index) {
+  return vget_u64m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vget_v_u64m8_u64m1(vuint64m8_t src, size_t index) {
+  return vget_u64m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u64m4_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vget_v_u64m4_u64m2(vuint64m4_t src, size_t index) {
+  return vget_u64m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vget_v_u64m8_u64m2(vuint64m8_t src, size_t index) {
+  return vget_u64m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_u64m8_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) {
+  return vget_u64m4(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f64m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vget_v_f64m2_f64m1(vfloat64m2_t src, size_t index) {
+  return vget_f64m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vget_v_f64m4_f64m1(vfloat64m4_t src, size_t index) {
+  return vget_f64m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vget_v_f64m8_f64m1(vfloat64m8_t src, size_t index) {
+  return vget_f64m1(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f64m4_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vget_v_f64m4_f64m2(vfloat64m4_t src, size_t index) {
+  return vget_f64m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vget_v_f64m8_f64m2(vfloat64m8_t src, size_t index) {
+  return vget_f64m2(src, 0);
+}
+
+// CHECK-RV64-LABEL: @test_vget_v_f64m8_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[SRC:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vget_v_f64m8_f64m4(vfloat64m8_t src, size_t index) {
+  return vget_f64m4(src, 0);
+}
+

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c
new file mode 100644
index 0000000000000..3d27463efbfa4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vlmul.c
@@ -0,0 +1,2166 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_ext_v_i8mf8_i8mf4(vint8mf8_t op1) {
+  return vlmul_ext_i8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_ext_v_i8mf8_i8mf2(vint8mf8_t op1) {
+  return vlmul_ext_i8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_ext_v_i8mf8_i8m1(vint8mf8_t op1) {
+  return vlmul_ext_i8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_ext_v_i8mf8_i8m2(vint8mf8_t op1) {
+  return vlmul_ext_i8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_ext_v_i8mf8_i8m4(vint8mf8_t op1) {
+  return vlmul_ext_i8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8mf8_i8m8(vint8mf8_t op1) {
+  return vlmul_ext_i8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_ext_v_i8mf4_i8mf2(vint8mf4_t op1) {
+  return vlmul_ext_i8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_ext_v_i8mf4_i8m1(vint8mf4_t op1) {
+  return vlmul_ext_i8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_ext_v_i8mf4_i8m2(vint8mf4_t op1) {
+  return vlmul_ext_i8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_ext_v_i8mf4_i8m4(vint8mf4_t op1) {
+  return vlmul_ext_i8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf4_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8mf4_i8m8(vint8mf4_t op1) {
+  return vlmul_ext_i8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_ext_v_i8mf2_i8m1(vint8mf2_t op1) {
+  return vlmul_ext_i8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_ext_v_i8mf2_i8m2(vint8mf2_t op1) {
+  return vlmul_ext_i8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_ext_v_i8mf2_i8m4(vint8mf2_t op1) {
+  return vlmul_ext_i8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8mf2_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8mf2_i8m8(vint8mf2_t op1) {
+  return vlmul_ext_i8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_ext_v_i8m1_i8m2(vint8m1_t op1) {
+  return vlmul_ext_i8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_ext_v_i8m1_i8m4(vint8m1_t op1) {
+  return vlmul_ext_i8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m1_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8m1_i8m8(vint8m1_t op1) {
+  return vlmul_ext_i8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_ext_v_i8m2_i8m4(vint8m2_t op1) {
+  return vlmul_ext_i8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m2_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8m2_i8m8(vint8m2_t op1) {
+  return vlmul_ext_i8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i8m4_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vlmul_ext_v_i8m4_i8m8(vint8m4_t op1) {
+  return vlmul_ext_i8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vlmul_ext_v_i16mf4_i16mf2(vint16mf4_t op1) {
+  return vlmul_ext_i16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vlmul_ext_v_i16mf4_i16m1(vint16mf4_t op1) {
+  return vlmul_ext_i16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vlmul_ext_v_i16mf4_i16m2(vint16mf4_t op1) {
+  return vlmul_ext_i16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vlmul_ext_v_i16mf4_i16m4(vint16mf4_t op1) {
+  return vlmul_ext_i16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf4_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vlmul_ext_v_i16mf4_i16m8(vint16mf4_t op1) {
+  return vlmul_ext_i16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vlmul_ext_v_i16mf2_i16m1(vint16mf2_t op1) {
+  return vlmul_ext_i16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vlmul_ext_v_i16mf2_i16m2(vint16mf2_t op1) {
+  return vlmul_ext_i16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vlmul_ext_v_i16mf2_i16m4(vint16mf2_t op1) {
+  return vlmul_ext_i16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16mf2_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vlmul_ext_v_i16mf2_i16m8(vint16mf2_t op1) {
+  return vlmul_ext_i16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vlmul_ext_v_i16m1_i16m2(vint16m1_t op1) {
+  return vlmul_ext_i16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vlmul_ext_v_i16m1_i16m4(vint16m1_t op1) {
+  return vlmul_ext_i16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m1_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vlmul_ext_v_i16m1_i16m8(vint16m1_t op1) {
+  return vlmul_ext_i16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vlmul_ext_v_i16m2_i16m4(vint16m2_t op1) {
+  return vlmul_ext_i16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m2_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vlmul_ext_v_i16m2_i16m8(vint16m2_t op1) {
+  return vlmul_ext_i16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i16m4_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vlmul_ext_v_i16m4_i16m8(vint16m4_t op1) {
+  return vlmul_ext_i16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vlmul_ext_v_i32mf2_i32m1(vint32mf2_t op1) {
+  return vlmul_ext_i32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vlmul_ext_v_i32mf2_i32m2(vint32mf2_t op1) {
+  return vlmul_ext_i32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vlmul_ext_v_i32mf2_i32m4(vint32mf2_t op1) {
+  return vlmul_ext_i32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32mf2_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vlmul_ext_v_i32mf2_i32m8(vint32mf2_t op1) {
+  return vlmul_ext_i32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vlmul_ext_v_i32m1_i32m2(vint32m1_t op1) {
+  return vlmul_ext_i32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vlmul_ext_v_i32m1_i32m4(vint32m1_t op1) {
+  return vlmul_ext_i32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m1_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vlmul_ext_v_i32m1_i32m8(vint32m1_t op1) {
+  return vlmul_ext_i32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vlmul_ext_v_i32m2_i32m4(vint32m2_t op1) {
+  return vlmul_ext_i32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m2_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vlmul_ext_v_i32m2_i32m8(vint32m2_t op1) {
+  return vlmul_ext_i32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i32m4_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vlmul_ext_v_i32m4_i32m8(vint32m4_t op1) {
+  return vlmul_ext_i32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vlmul_ext_v_i64m1_i64m2(vint64m1_t op1) {
+  return vlmul_ext_i64m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vlmul_ext_v_i64m1_i64m4(vint64m1_t op1) {
+  return vlmul_ext_i64m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m1_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vlmul_ext_v_i64m1_i64m8(vint64m1_t op1) {
+  return vlmul_ext_i64m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vlmul_ext_v_i64m2_i64m4(vint64m2_t op1) {
+  return vlmul_ext_i64m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m2_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vlmul_ext_v_i64m2_i64m8(vint64m2_t op1) {
+  return vlmul_ext_i64m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_i64m4_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vlmul_ext_v_i64m4_i64m8(vint64m4_t op1) {
+  return vlmul_ext_i64m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.insert.nxv2i8.nxv1i8(<vscale x 2 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_ext_v_u8mf8_u8mf4(vuint8mf8_t op1) {
+  return vlmul_ext_u8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv1i8(<vscale x 4 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_ext_v_u8mf8_u8mf2(vuint8mf8_t op1) {
+  return vlmul_ext_u8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv1i8(<vscale x 8 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_ext_v_u8mf8_u8m1(vuint8mf8_t op1) {
+  return vlmul_ext_u8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv1i8(<vscale x 16 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_ext_v_u8mf8_u8m2(vuint8mf8_t op1) {
+  return vlmul_ext_u8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv1i8(<vscale x 32 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_ext_v_u8mf8_u8m4(vuint8mf8_t op1) {
+  return vlmul_ext_u8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv1i8(<vscale x 64 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8mf8_u8m8(vuint8mf8_t op1) {
+  return vlmul_ext_u8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.insert.nxv4i8.nxv2i8(<vscale x 4 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_ext_v_u8mf4_u8mf2(vuint8mf4_t op1) {
+  return vlmul_ext_u8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv2i8(<vscale x 8 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_ext_v_u8mf4_u8m1(vuint8mf4_t op1) {
+  return vlmul_ext_u8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv2i8(<vscale x 16 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_ext_v_u8mf4_u8m2(vuint8mf4_t op1) {
+  return vlmul_ext_u8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv2i8(<vscale x 32 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_ext_v_u8mf4_u8m4(vuint8mf4_t op1) {
+  return vlmul_ext_u8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf4_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv2i8(<vscale x 64 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8mf4_u8m8(vuint8mf4_t op1) {
+  return vlmul_ext_u8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.insert.nxv8i8.nxv4i8(<vscale x 8 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_ext_v_u8mf2_u8m1(vuint8mf2_t op1) {
+  return vlmul_ext_u8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv4i8(<vscale x 16 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_ext_v_u8mf2_u8m2(vuint8mf2_t op1) {
+  return vlmul_ext_u8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv4i8(<vscale x 32 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_ext_v_u8mf2_u8m4(vuint8mf2_t op1) {
+  return vlmul_ext_u8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8mf2_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv4i8(<vscale x 64 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8mf2_u8m8(vuint8mf2_t op1) {
+  return vlmul_ext_u8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_ext_v_u8m1_u8m2(vuint8m1_t op1) {
+  return vlmul_ext_u8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_ext_v_u8m1_u8m4(vuint8m1_t op1) {
+  return vlmul_ext_u8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m1_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8m1_u8m8(vuint8m1_t op1) {
+  return vlmul_ext_u8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_ext_v_u8m2_u8m4(vuint8m2_t op1) {
+  return vlmul_ext_u8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m2_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8m2_u8m8(vuint8m2_t op1) {
+  return vlmul_ext_u8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u8m4_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vlmul_ext_v_u8m4_u8m8(vuint8m4_t op1) {
+  return vlmul_ext_u8m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.insert.nxv2i16.nxv1i16(<vscale x 2 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vlmul_ext_v_u16mf4_u16mf2(vuint16mf4_t op1) {
+  return vlmul_ext_u16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv1i16(<vscale x 4 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vlmul_ext_v_u16mf4_u16m1(vuint16mf4_t op1) {
+  return vlmul_ext_u16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vlmul_ext_v_u16mf4_u16m2(vuint16mf4_t op1) {
+  return vlmul_ext_u16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv1i16(<vscale x 16 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vlmul_ext_v_u16mf4_u16m4(vuint16mf4_t op1) {
+  return vlmul_ext_u16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf4_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv1i16(<vscale x 32 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vlmul_ext_v_u16mf4_u16m8(vuint16mf4_t op1) {
+  return vlmul_ext_u16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.insert.nxv4i16.nxv2i16(<vscale x 4 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vlmul_ext_v_u16mf2_u16m1(vuint16mf2_t op1) {
+  return vlmul_ext_u16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv2i16(<vscale x 8 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vlmul_ext_v_u16mf2_u16m2(vuint16mf2_t op1) {
+  return vlmul_ext_u16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv2i16(<vscale x 16 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vlmul_ext_v_u16mf2_u16m4(vuint16mf2_t op1) {
+  return vlmul_ext_u16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16mf2_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv2i16(<vscale x 32 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vlmul_ext_v_u16mf2_u16m8(vuint16mf2_t op1) {
+  return vlmul_ext_u16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vlmul_ext_v_u16m1_u16m2(vuint16m1_t op1) {
+  return vlmul_ext_u16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vlmul_ext_v_u16m1_u16m4(vuint16m1_t op1) {
+  return vlmul_ext_u16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m1_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vlmul_ext_v_u16m1_u16m8(vuint16m1_t op1) {
+  return vlmul_ext_u16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vlmul_ext_v_u16m2_u16m4(vuint16m2_t op1) {
+  return vlmul_ext_u16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m2_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vlmul_ext_v_u16m2_u16m8(vuint16m2_t op1) {
+  return vlmul_ext_u16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u16m4_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vlmul_ext_v_u16m4_u16m8(vuint16m4_t op1) {
+  return vlmul_ext_u16m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.insert.nxv2i32.nxv1i32(<vscale x 2 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vlmul_ext_v_u32mf2_u32m1(vuint32mf2_t op1) {
+  return vlmul_ext_u32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv1i32(<vscale x 4 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vlmul_ext_v_u32mf2_u32m2(vuint32mf2_t op1) {
+  return vlmul_ext_u32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv1i32(<vscale x 8 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vlmul_ext_v_u32mf2_u32m4(vuint32mf2_t op1) {
+  return vlmul_ext_u32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32mf2_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv1i32(<vscale x 16 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vlmul_ext_v_u32mf2_u32m8(vuint32mf2_t op1) {
+  return vlmul_ext_u32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vlmul_ext_v_u32m1_u32m2(vuint32m1_t op1) {
+  return vlmul_ext_u32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vlmul_ext_v_u32m1_u32m4(vuint32m1_t op1) {
+  return vlmul_ext_u32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m1_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vlmul_ext_v_u32m1_u32m8(vuint32m1_t op1) {
+  return vlmul_ext_u32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vlmul_ext_v_u32m2_u32m4(vuint32m2_t op1) {
+  return vlmul_ext_u32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m2_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vlmul_ext_v_u32m2_u32m8(vuint32m2_t op1) {
+  return vlmul_ext_u32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u32m4_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vlmul_ext_v_u32m4_u32m8(vuint32m4_t op1) {
+  return vlmul_ext_u32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vlmul_ext_v_u64m1_u64m2(vuint64m1_t op1) {
+  return vlmul_ext_u64m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vlmul_ext_v_u64m1_u64m4(vuint64m1_t op1) {
+  return vlmul_ext_u64m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m1_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vlmul_ext_v_u64m1_u64m8(vuint64m1_t op1) {
+  return vlmul_ext_u64m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vlmul_ext_v_u64m2_u64m4(vuint64m2_t op1) {
+  return vlmul_ext_u64m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m2_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vlmul_ext_v_u64m2_u64m8(vuint64m2_t op1) {
+  return vlmul_ext_u64m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_u64m4_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vlmul_ext_v_u64m4_u64m8(vuint64m4_t op1) {
+  return vlmul_ext_u64m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.insert.nxv2f32.nxv1f32(<vscale x 2 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vlmul_ext_v_f32mf2_f32m1(vfloat32mf2_t op1) {
+  return vlmul_ext_f32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv1f32(<vscale x 4 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vlmul_ext_v_f32mf2_f32m2(vfloat32mf2_t op1) {
+  return vlmul_ext_f32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv1f32(<vscale x 8 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vlmul_ext_v_f32mf2_f32m4(vfloat32mf2_t op1) {
+  return vlmul_ext_f32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32mf2_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv1f32(<vscale x 16 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vlmul_ext_v_f32mf2_f32m8(vfloat32mf2_t op1) {
+  return vlmul_ext_f32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vlmul_ext_v_f32m1_f32m2(vfloat32m1_t op1) {
+  return vlmul_ext_f32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vlmul_ext_v_f32m1_f32m4(vfloat32m1_t op1) {
+  return vlmul_ext_f32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m1_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vlmul_ext_v_f32m1_f32m8(vfloat32m1_t op1) {
+  return vlmul_ext_f32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vlmul_ext_v_f32m2_f32m4(vfloat32m2_t op1) {
+  return vlmul_ext_f32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m2_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vlmul_ext_v_f32m2_f32m8(vfloat32m2_t op1) {
+  return vlmul_ext_f32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f32m4_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vlmul_ext_v_f32m4_f32m8(vfloat32m4_t op1) {
+  return vlmul_ext_f32m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vlmul_ext_v_f64m1_f64m2(vfloat64m1_t op1) {
+  return vlmul_ext_f64m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vlmul_ext_v_f64m1_f64m4(vfloat64m1_t op1) {
+  return vlmul_ext_f64m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m1_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vlmul_ext_v_f64m1_f64m8(vfloat64m1_t op1) {
+  return vlmul_ext_f64m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vlmul_ext_v_f64m2_f64m4(vfloat64m2_t op1) {
+  return vlmul_ext_f64m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m2_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vlmul_ext_v_f64m2_f64m8(vfloat64m2_t op1) {
+  return vlmul_ext_f64m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_ext_v_f64m4_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vlmul_ext_v_f64m4_f64m8(vfloat64m4_t op1) {
+  return vlmul_ext_f64m8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf4_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv2i8(<vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8mf4_i8mf8(vint8mf4_t op1) {
+  return vlmul_trunc_i8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8mf2_i8mf8(vint8mf2_t op1) {
+  return vlmul_trunc_i8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8mf2_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_trunc_v_i8mf2_i8mf4(vint8mf2_t op1) {
+  return vlmul_trunc_i8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8m1_i8mf8(vint8m1_t op1) {
+  return vlmul_trunc_i8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_trunc_v_i8m1_i8mf4(vint8m1_t op1) {
+  return vlmul_trunc_i8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m1_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_trunc_v_i8m1_i8mf2(vint8m1_t op1) {
+  return vlmul_trunc_i8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8m2_i8mf8(vint8m2_t op1) {
+  return vlmul_trunc_i8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_trunc_v_i8m2_i8mf4(vint8m2_t op1) {
+  return vlmul_trunc_i8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_trunc_v_i8m2_i8mf2(vint8m2_t op1) {
+  return vlmul_trunc_i8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m2_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_trunc_v_i8m2_i8m1(vint8m2_t op1) {
+  return vlmul_trunc_i8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8m4_i8mf8(vint8m4_t op1) {
+  return vlmul_trunc_i8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_trunc_v_i8m4_i8mf4(vint8m4_t op1) {
+  return vlmul_trunc_i8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_trunc_v_i8m4_i8mf2(vint8m4_t op1) {
+  return vlmul_trunc_i8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_trunc_v_i8m4_i8m1(vint8m4_t op1) {
+  return vlmul_trunc_i8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m4_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_trunc_v_i8m4_i8m2(vint8m4_t op1) {
+  return vlmul_trunc_i8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vlmul_trunc_v_i8m8_i8mf8(vint8m8_t op1) {
+  return vlmul_trunc_i8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vlmul_trunc_v_i8m8_i8mf4(vint8m8_t op1) {
+  return vlmul_trunc_i8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vlmul_trunc_v_i8m8_i8mf2(vint8m8_t op1) {
+  return vlmul_trunc_i8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vlmul_trunc_v_i8m8_i8m1(vint8m8_t op1) {
+  return vlmul_trunc_i8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vlmul_trunc_v_i8m8_i8m2(vint8m8_t op1) {
+  return vlmul_trunc_i8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i8m8_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vlmul_trunc_v_i8m8_i8m4(vint8m8_t op1) {
+  return vlmul_trunc_i8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16mf2_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv2i16(<vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vlmul_trunc_v_i16mf2_i16mf4(vint16mf2_t op1) {
+  return vlmul_trunc_i16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vlmul_trunc_v_i16m1_i16mf4(vint16m1_t op1) {
+  return vlmul_trunc_i16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m1_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vlmul_trunc_v_i16m1_i16mf2(vint16m1_t op1) {
+  return vlmul_trunc_i16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vlmul_trunc_v_i16m2_i16mf4(vint16m2_t op1) {
+  return vlmul_trunc_i16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vlmul_trunc_v_i16m2_i16mf2(vint16m2_t op1) {
+  return vlmul_trunc_i16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vlmul_trunc_v_i16m2_i16m1(vint16m2_t op1) {
+  return vlmul_trunc_i16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vlmul_trunc_v_i16m4_i16mf4(vint16m4_t op1) {
+  return vlmul_trunc_i16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vlmul_trunc_v_i16m4_i16mf2(vint16m4_t op1) {
+  return vlmul_trunc_i16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vlmul_trunc_v_i16m4_i16m1(vint16m4_t op1) {
+  return vlmul_trunc_i16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m4_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vlmul_trunc_v_i16m4_i16m2(vint16m4_t op1) {
+  return vlmul_trunc_i16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vlmul_trunc_v_i16m8_i16mf4(vint16m8_t op1) {
+  return vlmul_trunc_i16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vlmul_trunc_v_i16m8_i16mf2(vint16m8_t op1) {
+  return vlmul_trunc_i16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vlmul_trunc_v_i16m8_i16m1(vint16m8_t op1) {
+  return vlmul_trunc_i16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vlmul_trunc_v_i16m8_i16m2(vint16m8_t op1) {
+  return vlmul_trunc_i16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i16m8_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vlmul_trunc_v_i16m8_i16m4(vint16m8_t op1) {
+  return vlmul_trunc_i16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m1_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vlmul_trunc_v_i32m1_i32mf2(vint32m1_t op1) {
+  return vlmul_trunc_i32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vlmul_trunc_v_i32m2_i32mf2(vint32m2_t op1) {
+  return vlmul_trunc_i32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vlmul_trunc_v_i32m2_i32m1(vint32m2_t op1) {
+  return vlmul_trunc_i32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vlmul_trunc_v_i32m4_i32mf2(vint32m4_t op1) {
+  return vlmul_trunc_i32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vlmul_trunc_v_i32m4_i32m1(vint32m4_t op1) {
+  return vlmul_trunc_i32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m4_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vlmul_trunc_v_i32m4_i32m2(vint32m4_t op1) {
+  return vlmul_trunc_i32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vlmul_trunc_v_i32m8_i32mf2(vint32m8_t op1) {
+  return vlmul_trunc_i32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vlmul_trunc_v_i32m8_i32m1(vint32m8_t op1) {
+  return vlmul_trunc_i32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vlmul_trunc_v_i32m8_i32m2(vint32m8_t op1) {
+  return vlmul_trunc_i32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i32m8_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vlmul_trunc_v_i32m8_i32m4(vint32m8_t op1) {
+  return vlmul_trunc_i32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vlmul_trunc_v_i64m2_i64m1(vint64m2_t op1) {
+  return vlmul_trunc_i64m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vlmul_trunc_v_i64m4_i64m1(vint64m4_t op1) {
+  return vlmul_trunc_i64m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m4_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vlmul_trunc_v_i64m4_i64m2(vint64m4_t op1) {
+  return vlmul_trunc_i64m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vlmul_trunc_v_i64m8_i64m1(vint64m8_t op1) {
+  return vlmul_trunc_i64m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vlmul_trunc_v_i64m8_i64m2(vint64m8_t op1) {
+  return vlmul_trunc_i64m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_i64m8_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vlmul_trunc_v_i64m8_i64m4(vint64m8_t op1) {
+  return vlmul_trunc_i64m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf4_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv2i8(<vscale x 2 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8mf4_u8mf8(vuint8mf4_t op1) {
+  return vlmul_trunc_u8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8mf2_u8mf8(vuint8mf2_t op1) {
+  return vlmul_trunc_u8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8mf2_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv4i8(<vscale x 4 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_trunc_v_u8mf2_u8mf4(vuint8mf2_t op1) {
+  return vlmul_trunc_u8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8m1_u8mf8(vuint8m1_t op1) {
+  return vlmul_trunc_u8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_trunc_v_u8m1_u8mf4(vuint8m1_t op1) {
+  return vlmul_trunc_u8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m1_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv8i8(<vscale x 8 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_trunc_v_u8m1_u8mf2(vuint8m1_t op1) {
+  return vlmul_trunc_u8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8m2_u8mf8(vuint8m2_t op1) {
+  return vlmul_trunc_u8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_trunc_v_u8m2_u8mf4(vuint8m2_t op1) {
+  return vlmul_trunc_u8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_trunc_v_u8m2_u8mf2(vuint8m2_t op1) {
+  return vlmul_trunc_u8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m2_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_trunc_v_u8m2_u8m1(vuint8m2_t op1) {
+  return vlmul_trunc_u8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8m4_u8mf8(vuint8m4_t op1) {
+  return vlmul_trunc_u8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_trunc_v_u8m4_u8mf4(vuint8m4_t op1) {
+  return vlmul_trunc_u8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_trunc_v_u8m4_u8mf2(vuint8m4_t op1) {
+  return vlmul_trunc_u8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_trunc_v_u8m4_u8m1(vuint8m4_t op1) {
+  return vlmul_trunc_u8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m4_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_trunc_v_u8m4_u8m2(vuint8m4_t op1) {
+  return vlmul_trunc_u8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.experimental.vector.extract.nxv1i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vlmul_trunc_v_u8m8_u8mf8(vuint8m8_t op1) {
+  return vlmul_trunc_u8mf8(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vlmul_trunc_v_u8m8_u8mf4(vuint8m8_t op1) {
+  return vlmul_trunc_u8mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.experimental.vector.extract.nxv4i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vlmul_trunc_v_u8m8_u8mf2(vuint8m8_t op1) {
+  return vlmul_trunc_u8mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vector.extract.nxv8i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vlmul_trunc_v_u8m8_u8m1(vuint8m8_t op1) {
+  return vlmul_trunc_u8m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vlmul_trunc_v_u8m8_u8m2(vuint8m8_t op1) {
+  return vlmul_trunc_u8m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u8m8_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.extract.nxv32i8.nxv64i8(<vscale x 64 x i8> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vlmul_trunc_v_u8m8_u8m4(vuint8m8_t op1) {
+  return vlmul_trunc_u8m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16mf2_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv2i16(<vscale x 2 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vlmul_trunc_v_u16mf2_u16mf4(vuint16mf2_t op1) {
+  return vlmul_trunc_u16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vlmul_trunc_v_u16m1_u16mf4(vuint16m1_t op1) {
+  return vlmul_trunc_u16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m1_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv4i16(<vscale x 4 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vlmul_trunc_v_u16m1_u16mf2(vuint16m1_t op1) {
+  return vlmul_trunc_u16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vlmul_trunc_v_u16m2_u16mf4(vuint16m2_t op1) {
+  return vlmul_trunc_u16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vlmul_trunc_v_u16m2_u16mf2(vuint16m2_t op1) {
+  return vlmul_trunc_u16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vlmul_trunc_v_u16m2_u16m1(vuint16m2_t op1) {
+  return vlmul_trunc_u16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vlmul_trunc_v_u16m4_u16mf4(vuint16m4_t op1) {
+  return vlmul_trunc_u16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vlmul_trunc_v_u16m4_u16mf2(vuint16m4_t op1) {
+  return vlmul_trunc_u16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vlmul_trunc_v_u16m4_u16m1(vuint16m4_t op1) {
+  return vlmul_trunc_u16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m4_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vlmul_trunc_v_u16m4_u16m2(vuint16m4_t op1) {
+  return vlmul_trunc_u16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vlmul_trunc_v_u16m8_u16mf4(vuint16m8_t op1) {
+  return vlmul_trunc_u16mf4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.experimental.vector.extract.nxv2i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vlmul_trunc_v_u16m8_u16mf2(vuint16m8_t op1) {
+  return vlmul_trunc_u16mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.experimental.vector.extract.nxv4i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vlmul_trunc_v_u16m8_u16m1(vuint16m8_t op1) {
+  return vlmul_trunc_u16m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vlmul_trunc_v_u16m8_u16m2(vuint16m8_t op1) {
+  return vlmul_trunc_u16m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u16m8_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.extract.nxv16i16.nxv32i16(<vscale x 32 x i16> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vlmul_trunc_v_u16m8_u16m4(vuint16m8_t op1) {
+  return vlmul_trunc_u16m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m1_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vlmul_trunc_v_u32m1_u32mf2(vuint32m1_t op1) {
+  return vlmul_trunc_u32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vlmul_trunc_v_u32m2_u32mf2(vuint32m2_t op1) {
+  return vlmul_trunc_u32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vlmul_trunc_v_u32m2_u32m1(vuint32m2_t op1) {
+  return vlmul_trunc_u32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vlmul_trunc_v_u32m4_u32mf2(vuint32m4_t op1) {
+  return vlmul_trunc_u32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vlmul_trunc_v_u32m4_u32m1(vuint32m4_t op1) {
+  return vlmul_trunc_u32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m4_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vlmul_trunc_v_u32m4_u32m2(vuint32m4_t op1) {
+  return vlmul_trunc_u32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vlmul_trunc_v_u32m8_u32mf2(vuint32m8_t op1) {
+  return vlmul_trunc_u32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vlmul_trunc_v_u32m8_u32m1(vuint32m8_t op1) {
+  return vlmul_trunc_u32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vlmul_trunc_v_u32m8_u32m2(vuint32m8_t op1) {
+  return vlmul_trunc_u32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u32m8_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vlmul_trunc_v_u32m8_u32m4(vuint32m8_t op1) {
+  return vlmul_trunc_u32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vlmul_trunc_v_u64m2_u64m1(vuint64m2_t op1) {
+  return vlmul_trunc_u64m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vlmul_trunc_v_u64m4_u64m1(vuint64m4_t op1) {
+  return vlmul_trunc_u64m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m4_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vlmul_trunc_v_u64m4_u64m2(vuint64m4_t op1) {
+  return vlmul_trunc_u64m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.experimental.vector.extract.nxv1i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vlmul_trunc_v_u64m8_u64m1(vuint64m8_t op1) {
+  return vlmul_trunc_u64m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vlmul_trunc_v_u64m8_u64m2(vuint64m8_t op1) {
+  return vlmul_trunc_u64m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_u64m8_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.extract.nxv4i64.nxv8i64(<vscale x 8 x i64> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vlmul_trunc_v_u64m8_u64m4(vuint64m8_t op1) {
+  return vlmul_trunc_u64m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m1_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv2f32(<vscale x 2 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vlmul_trunc_v_f32m1_f32mf2(vfloat32m1_t op1) {
+  return vlmul_trunc_f32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vlmul_trunc_v_f32m2_f32mf2(vfloat32m2_t op1) {
+  return vlmul_trunc_f32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m2_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vlmul_trunc_v_f32m2_f32m1(vfloat32m2_t op1) {
+  return vlmul_trunc_f32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vlmul_trunc_v_f32m4_f32mf2(vfloat32m4_t op1) {
+  return vlmul_trunc_f32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vlmul_trunc_v_f32m4_f32m1(vfloat32m4_t op1) {
+  return vlmul_trunc_f32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m4_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vlmul_trunc_v_f32m4_f32m2(vfloat32m4_t op1) {
+  return vlmul_trunc_f32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.experimental.vector.extract.nxv1f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vlmul_trunc_v_f32m8_f32mf2(vfloat32m8_t op1) {
+  return vlmul_trunc_f32mf2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.experimental.vector.extract.nxv2f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vlmul_trunc_v_f32m8_f32m1(vfloat32m8_t op1) {
+  return vlmul_trunc_f32m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vlmul_trunc_v_f32m8_f32m2(vfloat32m8_t op1) {
+  return vlmul_trunc_f32m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f32m8_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.extract.nxv8f32.nxv16f32(<vscale x 16 x float> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vlmul_trunc_v_f32m8_f32m4(vfloat32m8_t op1) {
+  return vlmul_trunc_f32m4(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m2_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv2f64(<vscale x 2 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vlmul_trunc_v_f64m2_f64m1(vfloat64m2_t op1) {
+  return vlmul_trunc_f64m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv4f64(<vscale x 4 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vlmul_trunc_v_f64m4_f64m1(vfloat64m4_t op1) {
+  return vlmul_trunc_f64m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m4_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vlmul_trunc_v_f64m4_f64m2(vfloat64m4_t op1) {
+  return vlmul_trunc_f64m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.experimental.vector.extract.nxv1f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vlmul_trunc_v_f64m8_f64m1(vfloat64m8_t op1) {
+  return vlmul_trunc_f64m1(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vlmul_trunc_v_f64m8_f64m2(vfloat64m8_t op1) {
+  return vlmul_trunc_f64m2(op1);
+}
+
+// CHECK-RV64-LABEL: @test_vlmul_trunc_v_f64m8_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.extract.nxv4f64.nxv8f64(<vscale x 8 x double> [[OP1:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vlmul_trunc_v_f64m8_f64m4(vfloat64m8_t op1) {
+  return vlmul_trunc_f64m4(op1);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vreinterpret.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vreinterpret.c
new file mode 100644
index 0000000000000..760e697d9398a
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vreinterpret.c
@@ -0,0 +1,1690 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf8_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[SRC:%.*]]
+//
+vuint8mf8_t test_vreinterpret_v_i8mf8_u8mf8(vint8mf8_t src) {
+  return vreinterpret_u8mf8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[SRC:%.*]]
+//
+vuint8mf4_t test_vreinterpret_v_i8mf4_u8mf4(vint8mf4_t src) {
+  return vreinterpret_u8mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[SRC:%.*]]
+//
+vuint8mf2_t test_vreinterpret_v_i8mf2_u8mf2(vint8mf2_t src) {
+  return vreinterpret_u8mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[SRC:%.*]]
+//
+vuint8m1_t test_vreinterpret_v_i8m1_u8m1(vint8m1_t src) {
+  return vreinterpret_u8m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[SRC:%.*]]
+//
+vuint8m2_t test_vreinterpret_v_i8m2_u8m2(vint8m2_t src) {
+  return vreinterpret_u8m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[SRC:%.*]]
+//
+vuint8m4_t test_vreinterpret_v_i8m4_u8m4(vint8m4_t src) {
+  return vreinterpret_u8m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[SRC:%.*]]
+//
+vuint8m8_t test_vreinterpret_v_i8m8_u8m8(vint8m8_t src) {
+  return vreinterpret_u8m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf8_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[SRC:%.*]]
+//
+vint8mf8_t test_vreinterpret_v_u8mf8_i8mf8(vuint8mf8_t src) {
+  return vreinterpret_i8mf8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[SRC:%.*]]
+//
+vint8mf4_t test_vreinterpret_v_u8mf4_i8mf4(vuint8mf4_t src) {
+  return vreinterpret_i8mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[SRC:%.*]]
+//
+vint8mf2_t test_vreinterpret_v_u8mf2_i8mf2(vuint8mf2_t src) {
+  return vreinterpret_i8mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[SRC:%.*]]
+//
+vint8m1_t test_vreinterpret_v_u8m1_i8m1(vuint8m1_t src) {
+  return vreinterpret_i8m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[SRC:%.*]]
+//
+vint8m2_t test_vreinterpret_v_u8m2_i8m2(vuint8m2_t src) {
+  return vreinterpret_i8m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[SRC:%.*]]
+//
+vint8m4_t test_vreinterpret_v_u8m4_i8m4(vuint8m4_t src) {
+  return vreinterpret_i8m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[SRC:%.*]]
+//
+vint8m8_t test_vreinterpret_v_u8m8_i8m8(vuint8m8_t src) {
+  return vreinterpret_i8m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[SRC:%.*]]
+//
+vuint16mf4_t test_vreinterpret_v_i16mf4_u16mf4(vint16mf4_t src) {
+  return vreinterpret_u16mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[SRC:%.*]]
+//
+vuint16mf2_t test_vreinterpret_v_i16mf2_u16mf2(vint16mf2_t src) {
+  return vreinterpret_u16mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[SRC:%.*]]
+//
+vuint16m1_t test_vreinterpret_v_i16m1_u16m1(vint16m1_t src) {
+  return vreinterpret_u16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[SRC:%.*]]
+//
+vuint16m2_t test_vreinterpret_v_i16m2_u16m2(vint16m2_t src) {
+  return vreinterpret_u16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[SRC:%.*]]
+//
+vuint16m4_t test_vreinterpret_v_i16m4_u16m4(vint16m4_t src) {
+  return vreinterpret_u16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[SRC:%.*]]
+//
+vuint16m8_t test_vreinterpret_v_i16m8_u16m8(vint16m8_t src) {
+  return vreinterpret_u16m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[SRC:%.*]]
+//
+vint16mf4_t test_vreinterpret_v_u16mf4_i16mf4(vuint16mf4_t src) {
+  return vreinterpret_i16mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[SRC:%.*]]
+//
+vint16mf2_t test_vreinterpret_v_u16mf2_i16mf2(vuint16mf2_t src) {
+  return vreinterpret_i16mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[SRC:%.*]]
+//
+vint16m1_t test_vreinterpret_v_u16m1_i16m1(vuint16m1_t src) {
+  return vreinterpret_i16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[SRC:%.*]]
+//
+vint16m2_t test_vreinterpret_v_u16m2_i16m2(vuint16m2_t src) {
+  return vreinterpret_i16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[SRC:%.*]]
+//
+vint16m4_t test_vreinterpret_v_u16m4_i16m4(vuint16m4_t src) {
+  return vreinterpret_i16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[SRC:%.*]]
+//
+vint16m8_t test_vreinterpret_v_u16m8_i16m8(vuint16m8_t src) {
+  return vreinterpret_i16m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[SRC:%.*]]
+//
+vuint32mf2_t test_vreinterpret_v_i32mf2_u32mf2(vint32mf2_t src) {
+  return vreinterpret_u32mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[SRC:%.*]]
+//
+vuint32m1_t test_vreinterpret_v_i32m1_u32m1(vint32m1_t src) {
+  return vreinterpret_u32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[SRC:%.*]]
+//
+vuint32m2_t test_vreinterpret_v_i32m2_u32m2(vint32m2_t src) {
+  return vreinterpret_u32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[SRC:%.*]]
+//
+vuint32m4_t test_vreinterpret_v_i32m4_u32m4(vint32m4_t src) {
+  return vreinterpret_u32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[SRC:%.*]]
+//
+vuint32m8_t test_vreinterpret_v_i32m8_u32m8(vint32m8_t src) {
+  return vreinterpret_u32m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[SRC:%.*]]
+//
+vint32mf2_t test_vreinterpret_v_u32mf2_i32mf2(vuint32mf2_t src) {
+  return vreinterpret_i32mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[SRC:%.*]]
+//
+vint32m1_t test_vreinterpret_v_u32m1_i32m1(vuint32m1_t src) {
+  return vreinterpret_i32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[SRC:%.*]]
+//
+vint32m2_t test_vreinterpret_v_u32m2_i32m2(vuint32m2_t src) {
+  return vreinterpret_i32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[SRC:%.*]]
+//
+vint32m4_t test_vreinterpret_v_u32m4_i32m4(vuint32m4_t src) {
+  return vreinterpret_i32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[SRC:%.*]]
+//
+vint32m8_t test_vreinterpret_v_u32m8_i32m8(vuint32m8_t src) {
+  return vreinterpret_i32m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x float> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vreinterpret_v_f32mf2_i32mf2(vfloat32mf2_t src) {
+  return vreinterpret_i32mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x float> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vreinterpret_v_f32m1_i32m1(vfloat32m1_t src) {
+  return vreinterpret_i32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x float> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vreinterpret_v_f32m2_i32m2(vfloat32m2_t src) {
+  return vreinterpret_i32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x float> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vreinterpret_v_f32m4_i32m4(vfloat32m4_t src) {
+  return vreinterpret_i32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vreinterpret_v_f32m8_i32m8(vfloat32m8_t src) {
+  return vreinterpret_i32m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32mf2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x float> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vreinterpret_v_f32mf2_u32mf2(vfloat32mf2_t src) {
+  return vreinterpret_u32mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x float> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vreinterpret_v_f32m1_u32m1(vfloat32m1_t src) {
+  return vreinterpret_u32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x float> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vreinterpret_v_f32m2_u32m2(vfloat32m2_t src) {
+  return vreinterpret_u32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x float> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vreinterpret_v_f32m4_u32m4(vfloat32m4_t src) {
+  return vreinterpret_u32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f32m8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x float> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vreinterpret_v_f32m8_u32m8(vfloat32m8_t src) {
+  return vreinterpret_u32m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 1 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vreinterpret_v_i32mf2_f32mf2(vint32mf2_t src) {
+  return vreinterpret_f32mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 2 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vreinterpret_v_i32m1_f32m1(vint32m1_t src) {
+  return vreinterpret_f32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 4 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vreinterpret_v_i32m2_f32m2(vint32m2_t src) {
+  return vreinterpret_f32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 8 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vreinterpret_v_i32m4_f32m4(vint32m4_t src) {
+  return vreinterpret_f32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 16 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vreinterpret_v_i32m8_f32m8(vint32m8_t src) {
+  return vreinterpret_f32m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_f32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 1 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vreinterpret_v_u32mf2_f32mf2(vuint32mf2_t src) {
+  return vreinterpret_f32mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_f32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 2 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vreinterpret_v_u32m1_f32m1(vuint32m1_t src) {
+  return vreinterpret_f32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 4 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vreinterpret_v_u32m2_f32m2(vuint32m2_t src) {
+  return vreinterpret_f32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 8 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vreinterpret_v_u32m4_f32m4(vuint32m4_t src) {
+  return vreinterpret_f32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 16 x float>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vreinterpret_v_u32m8_f32m8(vuint32m8_t src) {
+  return vreinterpret_f32m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[SRC:%.*]]
+//
+vuint64m1_t test_vreinterpret_v_i64m1_u64m1(vint64m1_t src) {
+  return vreinterpret_u64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[SRC:%.*]]
+//
+vuint64m2_t test_vreinterpret_v_i64m2_u64m2(vint64m2_t src) {
+  return vreinterpret_u64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[SRC:%.*]]
+//
+vuint64m4_t test_vreinterpret_v_i64m4_u64m4(vint64m4_t src) {
+  return vreinterpret_u64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[SRC:%.*]]
+//
+vuint64m8_t test_vreinterpret_v_i64m8_u64m8(vint64m8_t src) {
+  return vreinterpret_u64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[SRC:%.*]]
+//
+vint64m1_t test_vreinterpret_v_u64m1_i64m1(vuint64m1_t src) {
+  return vreinterpret_i64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[SRC:%.*]]
+//
+vint64m2_t test_vreinterpret_v_u64m2_i64m2(vuint64m2_t src) {
+  return vreinterpret_i64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[SRC:%.*]]
+//
+vint64m4_t test_vreinterpret_v_u64m4_i64m4(vuint64m4_t src) {
+  return vreinterpret_i64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[SRC:%.*]]
+//
+vint64m8_t test_vreinterpret_v_u64m8_i64m8(vuint64m8_t src) {
+  return vreinterpret_i64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x double> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vreinterpret_v_f64m1_i64m1(vfloat64m1_t src) {
+  return vreinterpret_i64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vreinterpret_v_f64m2_i64m2(vfloat64m2_t src) {
+  return vreinterpret_i64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x double> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vreinterpret_v_f64m4_i64m4(vfloat64m4_t src) {
+  return vreinterpret_i64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vreinterpret_v_f64m8_i64m8(vfloat64m8_t src) {
+  return vreinterpret_i64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x double> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vreinterpret_v_f64m1_u64m1(vfloat64m1_t src) {
+  return vreinterpret_u64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x double> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vreinterpret_v_f64m2_u64m2(vfloat64m2_t src) {
+  return vreinterpret_u64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x double> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vreinterpret_v_f64m4_u64m4(vfloat64m4_t src) {
+  return vreinterpret_u64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_f64m8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x double> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vreinterpret_v_f64m8_u64m8(vfloat64m8_t src) {
+  return vreinterpret_u64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 1 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vreinterpret_v_i64m1_f64m1(vint64m1_t src) {
+  return vreinterpret_f64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 2 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vreinterpret_v_i64m2_f64m2(vint64m2_t src) {
+  return vreinterpret_f64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 4 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vreinterpret_v_i64m4_f64m4(vint64m4_t src) {
+  return vreinterpret_f64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 8 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vreinterpret_v_i64m8_f64m8(vint64m8_t src) {
+  return vreinterpret_f64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_f64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 1 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vreinterpret_v_u64m1_f64m1(vuint64m1_t src) {
+  return vreinterpret_f64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 2 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vreinterpret_v_u64m2_f64m2(vuint64m2_t src) {
+  return vreinterpret_f64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 4 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vreinterpret_v_u64m4_f64m4(vuint64m4_t src) {
+  return vreinterpret_f64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 8 x double>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vreinterpret_v_u64m8_f64m8(vuint64m8_t src) {
+  return vreinterpret_f64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf4_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i8> [[SRC:%.*]] to <vscale x 1 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vreinterpret_v_i8mf4_i16mf4(vint8mf4_t src) {
+  return vreinterpret_i16mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vreinterpret_v_i8mf2_i16mf2(vint8mf2_t src) {
+  return vreinterpret_i16mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vreinterpret_v_i8m1_i16m1(vint8m1_t src) {
+  return vreinterpret_i16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vreinterpret_v_i8m2_i16m2(vint8m2_t src) {
+  return vreinterpret_i16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vreinterpret_v_i8m4_i16m4(vint8m4_t src) {
+  return vreinterpret_i16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vreinterpret_v_i8m8_i16m8(vint8m8_t src) {
+  return vreinterpret_i16m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf4_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i8> [[SRC:%.*]] to <vscale x 1 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vreinterpret_v_u8mf4_u16mf4(vuint8mf4_t src) {
+  return vreinterpret_u16mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vreinterpret_v_u8mf2_u16mf2(vuint8mf2_t src) {
+  return vreinterpret_u16mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vreinterpret_v_u8m1_u16m1(vuint8m1_t src) {
+  return vreinterpret_u16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vreinterpret_v_u8m2_u16m2(vuint8m2_t src) {
+  return vreinterpret_u16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vreinterpret_v_u8m4_u16m4(vuint8m4_t src) {
+  return vreinterpret_u16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vreinterpret_v_u8m8_u16m8(vuint8m8_t src) {
+  return vreinterpret_u16m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8mf2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vreinterpret_v_i8mf2_i32mf2(vint8mf2_t src) {
+  return vreinterpret_i32mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vreinterpret_v_i8m1_i32m1(vint8m1_t src) {
+  return vreinterpret_i32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vreinterpret_v_i8m2_i32m2(vint8m2_t src) {
+  return vreinterpret_i32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vreinterpret_v_i8m4_i32m4(vint8m4_t src) {
+  return vreinterpret_i32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vreinterpret_v_i8m8_i32m8(vint8m8_t src) {
+  return vreinterpret_i32m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8mf2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i8> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vreinterpret_v_u8mf2_u32mf2(vuint8mf2_t src) {
+  return vreinterpret_u32mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vreinterpret_v_u8m1_u32m1(vuint8m1_t src) {
+  return vreinterpret_u32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vreinterpret_v_u8m2_u32m2(vuint8m2_t src) {
+  return vreinterpret_u32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vreinterpret_v_u8m4_u32m4(vuint8m4_t src) {
+  return vreinterpret_u32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vreinterpret_v_u8m8_u32m8(vuint8m8_t src) {
+  return vreinterpret_u32m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vreinterpret_v_i8m1_i64m1(vint8m1_t src) {
+  return vreinterpret_i64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vreinterpret_v_i8m2_i64m2(vint8m2_t src) {
+  return vreinterpret_i64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vreinterpret_v_i8m4_i64m4(vint8m4_t src) {
+  return vreinterpret_i64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i8m8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vreinterpret_v_i8m8_i64m8(vint8m8_t src) {
+  return vreinterpret_i64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i8> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vreinterpret_v_u8m1_u64m1(vuint8m1_t src) {
+  return vreinterpret_u64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i8> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vreinterpret_v_u8m2_u64m2(vuint8m2_t src) {
+  return vreinterpret_u64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i8> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vreinterpret_v_u8m4_u64m4(vuint8m4_t src) {
+  return vreinterpret_u64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u8m8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 64 x i8> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vreinterpret_v_u8m8_u64m8(vuint8m8_t src) {
+  return vreinterpret_u64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf4_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i16> [[SRC:%.*]] to <vscale x 2 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vreinterpret_v_i16mf4_i8mf4(vint16mf4_t src) {
+  return vreinterpret_i8mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vreinterpret_v_i16mf2_i8mf2(vint16mf2_t src) {
+  return vreinterpret_i8mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vreinterpret_v_i16m1_i8m1(vint16m1_t src) {
+  return vreinterpret_i8m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vreinterpret_v_i16m2_i8m2(vint16m2_t src) {
+  return vreinterpret_i8m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vreinterpret_v_i16m4_i8m4(vint16m4_t src) {
+  return vreinterpret_i8m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vreinterpret_v_i16m8_i8m8(vint16m8_t src) {
+  return vreinterpret_i8m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf4_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i16> [[SRC:%.*]] to <vscale x 2 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vreinterpret_v_u16mf4_u8mf4(vuint16mf4_t src) {
+  return vreinterpret_u8mf4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vreinterpret_v_u16mf2_u8mf2(vuint16mf2_t src) {
+  return vreinterpret_u8mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vreinterpret_v_u16m1_u8m1(vuint16m1_t src) {
+  return vreinterpret_u8m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vreinterpret_v_u16m2_u8m2(vuint16m2_t src) {
+  return vreinterpret_u8m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vreinterpret_v_u16m4_u8m4(vuint16m4_t src) {
+  return vreinterpret_u8m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vreinterpret_v_u16m8_u8m8(vuint16m8_t src) {
+  return vreinterpret_u8m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16mf2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vreinterpret_v_i16mf2_i32mf2(vint16mf2_t src) {
+  return vreinterpret_i32mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vreinterpret_v_i16m1_i32m1(vint16m1_t src) {
+  return vreinterpret_i32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vreinterpret_v_i16m2_i32m2(vint16m2_t src) {
+  return vreinterpret_i32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vreinterpret_v_i16m4_i32m4(vint16m4_t src) {
+  return vreinterpret_i32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vreinterpret_v_i16m8_i32m8(vint16m8_t src) {
+  return vreinterpret_i32m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16mf2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i16> [[SRC:%.*]] to <vscale x 1 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vreinterpret_v_u16mf2_u32mf2(vuint16mf2_t src) {
+  return vreinterpret_u32mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vreinterpret_v_u16m1_u32m1(vuint16m1_t src) {
+  return vreinterpret_u32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vreinterpret_v_u16m2_u32m2(vuint16m2_t src) {
+  return vreinterpret_u32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vreinterpret_v_u16m4_u32m4(vuint16m4_t src) {
+  return vreinterpret_u32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vreinterpret_v_u16m8_u32m8(vuint16m8_t src) {
+  return vreinterpret_u32m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vreinterpret_v_i16m1_i64m1(vint16m1_t src) {
+  return vreinterpret_i64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vreinterpret_v_i16m2_i64m2(vint16m2_t src) {
+  return vreinterpret_i64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vreinterpret_v_i16m4_i64m4(vint16m4_t src) {
+  return vreinterpret_i64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i16m8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vreinterpret_v_i16m8_i64m8(vint16m8_t src) {
+  return vreinterpret_i64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i16> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vreinterpret_v_u16m1_u64m1(vuint16m1_t src) {
+  return vreinterpret_u64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i16> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vreinterpret_v_u16m2_u64m2(vuint16m2_t src) {
+  return vreinterpret_u64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i16> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vreinterpret_v_u16m4_u64m4(vuint16m4_t src) {
+  return vreinterpret_u64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u16m8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 32 x i16> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vreinterpret_v_u16m8_u64m8(vuint16m8_t src) {
+  return vreinterpret_u64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vreinterpret_v_i32mf2_i8mf2(vint32mf2_t src) {
+  return vreinterpret_i8mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vreinterpret_v_i32m1_i8m1(vint32m1_t src) {
+  return vreinterpret_i8m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vreinterpret_v_i32m2_i8m2(vint32m2_t src) {
+  return vreinterpret_i8m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vreinterpret_v_i32m4_i8m4(vint32m4_t src) {
+  return vreinterpret_i8m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vreinterpret_v_i32m8_i8m8(vint32m8_t src) {
+  return vreinterpret_i8m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 4 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vreinterpret_v_u32mf2_u8mf2(vuint32mf2_t src) {
+  return vreinterpret_u8mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vreinterpret_v_u32m1_u8m1(vuint32m1_t src) {
+  return vreinterpret_u8m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vreinterpret_v_u32m2_u8m2(vuint32m2_t src) {
+  return vreinterpret_u8m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vreinterpret_v_u32m4_u8m4(vuint32m4_t src) {
+  return vreinterpret_u8m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vreinterpret_v_u32m8_u8m8(vuint32m8_t src) {
+  return vreinterpret_u8m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32mf2_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vreinterpret_v_i32mf2_i16mf2(vint32mf2_t src) {
+  return vreinterpret_i16mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vreinterpret_v_i32m1_i16m1(vint32m1_t src) {
+  return vreinterpret_i16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vreinterpret_v_i32m2_i16m2(vint32m2_t src) {
+  return vreinterpret_i16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vreinterpret_v_i32m4_i16m4(vint32m4_t src) {
+  return vreinterpret_i16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vreinterpret_v_i32m8_i16m8(vint32m8_t src) {
+  return vreinterpret_i16m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32mf2_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i32> [[SRC:%.*]] to <vscale x 2 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vreinterpret_v_u32mf2_u16mf2(vuint32mf2_t src) {
+  return vreinterpret_u16mf2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vreinterpret_v_u32m1_u16m1(vuint32m1_t src) {
+  return vreinterpret_u16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vreinterpret_v_u32m2_u16m2(vuint32m2_t src) {
+  return vreinterpret_u16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vreinterpret_v_u32m4_u16m4(vuint32m4_t src) {
+  return vreinterpret_u16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vreinterpret_v_u32m8_u16m8(vuint32m8_t src) {
+  return vreinterpret_u16m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m1_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vreinterpret_v_i32m1_i64m1(vint32m1_t src) {
+  return vreinterpret_i64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vreinterpret_v_i32m2_i64m2(vint32m2_t src) {
+  return vreinterpret_i64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vreinterpret_v_i32m4_i64m4(vint32m4_t src) {
+  return vreinterpret_i64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i32m8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vreinterpret_v_i32m8_i64m8(vint32m8_t src) {
+  return vreinterpret_i64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m1_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i32> [[SRC:%.*]] to <vscale x 1 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vreinterpret_v_u32m1_u64m1(vuint32m1_t src) {
+  return vreinterpret_u64m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i32> [[SRC:%.*]] to <vscale x 2 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vreinterpret_v_u32m2_u64m2(vuint32m2_t src) {
+  return vreinterpret_u64m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i32> [[SRC:%.*]] to <vscale x 4 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vreinterpret_v_u32m4_u64m4(vuint32m4_t src) {
+  return vreinterpret_u64m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u32m8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i32> [[SRC:%.*]] to <vscale x 8 x i64>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vreinterpret_v_u32m8_u64m8(vuint32m8_t src) {
+  return vreinterpret_u64m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vreinterpret_v_i64m1_i8m1(vint64m1_t src) {
+  return vreinterpret_i8m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vreinterpret_v_i64m2_i8m2(vint64m2_t src) {
+  return vreinterpret_i8m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vreinterpret_v_i64m4_i8m4(vint64m4_t src) {
+  return vreinterpret_i8m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vreinterpret_v_i64m8_i8m8(vint64m8_t src) {
+  return vreinterpret_i8m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 8 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vreinterpret_v_u64m1_u8m1(vuint64m1_t src) {
+  return vreinterpret_u8m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 16 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vreinterpret_v_u64m2_u8m2(vuint64m2_t src) {
+  return vreinterpret_u8m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 32 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vreinterpret_v_u64m4_u8m4(vuint64m4_t src) {
+  return vreinterpret_u8m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 64 x i8>
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vreinterpret_v_u64m8_u8m8(vuint64m8_t src) {
+  return vreinterpret_u8m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vreinterpret_v_i64m1_i16m1(vint64m1_t src) {
+  return vreinterpret_i16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vreinterpret_v_i64m2_i16m2(vint64m2_t src) {
+  return vreinterpret_i16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vreinterpret_v_i64m4_i16m4(vint64m4_t src) {
+  return vreinterpret_i16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vreinterpret_v_i64m8_i16m8(vint64m8_t src) {
+  return vreinterpret_i16m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 4 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vreinterpret_v_u64m1_u16m1(vuint64m1_t src) {
+  return vreinterpret_u16m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 8 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vreinterpret_v_u64m2_u16m2(vuint64m2_t src) {
+  return vreinterpret_u16m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 16 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vreinterpret_v_u64m4_u16m4(vuint64m4_t src) {
+  return vreinterpret_u16m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 32 x i16>
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vreinterpret_v_u64m8_u16m8(vuint64m8_t src) {
+  return vreinterpret_u16m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m1_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vreinterpret_v_i64m1_i32m1(vint64m1_t src) {
+  return vreinterpret_i32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vreinterpret_v_i64m2_i32m2(vint64m2_t src) {
+  return vreinterpret_i32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vreinterpret_v_i64m4_i32m4(vint64m4_t src) {
+  return vreinterpret_i32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_i64m8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vreinterpret_v_i64m8_i32m8(vint64m8_t src) {
+  return vreinterpret_i32m8(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m1_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 1 x i64> [[SRC:%.*]] to <vscale x 2 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vreinterpret_v_u64m1_u32m1(vuint64m1_t src) {
+  return vreinterpret_u32m1(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 2 x i64> [[SRC:%.*]] to <vscale x 4 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vreinterpret_v_u64m2_u32m2(vuint64m2_t src) {
+  return vreinterpret_u32m2(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 4 x i64> [[SRC:%.*]] to <vscale x 8 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vreinterpret_v_u64m4_u32m4(vuint64m4_t src) {
+  return vreinterpret_u32m4(src);
+}
+
+// CHECK-RV64-LABEL: @test_vreinterpret_v_u64m8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 8 x i64> [[SRC:%.*]] to <vscale x 16 x i32>
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vreinterpret_v_u64m8_u32m8(vuint64m8_t src) {
+  return vreinterpret_u32m8(src);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c
new file mode 100644
index 0000000000000..5db2ff82f5984
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vset.c
@@ -0,0 +1,546 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
+// RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vset_v_i8m1_i8m2(vint8m2_t dest, size_t index, vint8m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vset_v_i8m1_i8m4(vint8m4_t dest, size_t index, vint8m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vset_v_i8m2_i8m4(vint8m4_t dest, size_t index, vint8m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i8m1_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vset_v_i8m1_i8m8(vint8m8_t dest, size_t index, vint8m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i8m2_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vset_v_i8m2_i8m8(vint8m8_t dest, size_t index, vint8m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i8m4_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vset_v_i8m4_i8m8(vint8m8_t dest, size_t index, vint8m4_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.nxv8i8(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vset_v_u8m1_u8m2(vuint8m2_t dest, size_t index, vuint8m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv8i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vset_v_u8m1_u8m4(vuint8m4_t dest, size_t index, vuint8m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vset_v_u8m2_u8m4(vuint8m4_t dest, size_t index, vuint8m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u8m1_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv8i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vset_v_u8m1_u8m8(vuint8m8_t dest, size_t index, vuint8m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u8m2_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vset_v_u8m2_u8m8(vuint8m8_t dest, size_t index, vuint8m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u8m4_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.experimental.vector.insert.nxv64i8.nxv32i8(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vset_v_u8m4_u8m8(vuint8m8_t dest, size_t index, vuint8m4_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vset_v_i16m1_i16m2(vint16m2_t dest, size_t index, vint16m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vset_v_i16m1_i16m4(vint16m4_t dest, size_t index, vint16m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vset_v_i16m2_i16m4(vint16m4_t dest, size_t index, vint16m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i16m1_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vset_v_i16m1_i16m8(vint16m8_t dest, size_t index, vint16m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i16m2_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vset_v_i16m2_i16m8(vint16m8_t dest, size_t index, vint16m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i16m4_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vset_v_i16m4_i16m8(vint16m8_t dest, size_t index, vint16m4_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.experimental.vector.insert.nxv8i16.nxv4i16(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vset_v_u16m1_u16m2(vuint16m2_t dest, size_t index, vuint16m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv4i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vset_v_u16m1_u16m4(vuint16m4_t dest, size_t index, vuint16m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.experimental.vector.insert.nxv16i16.nxv8i16(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vset_v_u16m2_u16m4(vuint16m4_t dest, size_t index, vuint16m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u16m1_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv4i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vset_v_u16m1_u16m8(vuint16m8_t dest, size_t index, vuint16m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u16m2_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv8i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vset_v_u16m2_u16m8(vuint16m8_t dest, size_t index, vuint16m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u16m4_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.experimental.vector.insert.nxv32i16.nxv16i16(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vset_v_u16m4_u16m8(vuint16m8_t dest, size_t index, vuint16m4_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vset_v_i32m1_i32m2(vint32m2_t dest, size_t index, vint32m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vset_v_i32m1_i32m4(vint32m4_t dest, size_t index, vint32m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vset_v_i32m2_i32m4(vint32m4_t dest, size_t index, vint32m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i32m1_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vset_v_i32m1_i32m8(vint32m8_t dest, size_t index, vint32m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i32m2_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vset_v_i32m2_i32m8(vint32m8_t dest, size_t index, vint32m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i32m4_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vset_v_i32m4_i32m8(vint32m8_t dest, size_t index, vint32m4_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.nxv2i32(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vset_v_u32m1_u32m2(vuint32m2_t dest, size_t index, vuint32m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv2i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vset_v_u32m1_u32m4(vuint32m4_t dest, size_t index, vuint32m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.experimental.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vset_v_u32m2_u32m4(vuint32m4_t dest, size_t index, vuint32m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u32m1_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv2i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vset_v_u32m1_u32m8(vuint32m8_t dest, size_t index, vuint32m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u32m2_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv4i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vset_v_u32m2_u32m8(vuint32m8_t dest, size_t index, vuint32m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u32m4_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.experimental.vector.insert.nxv16i32.nxv8i32(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vset_v_u32m4_u32m8(vuint32m8_t dest, size_t index, vuint32m4_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.experimental.vector.insert.nxv4f32.nxv2f32(<vscale x 4 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vset_v_f32m1_f32m2(vfloat32m2_t dest, size_t index, vfloat32m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv2f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vset_v_f32m1_f32m4(vfloat32m4_t dest, size_t index, vfloat32m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.experimental.vector.insert.nxv8f32.nxv4f32(<vscale x 8 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vset_v_f32m2_f32m4(vfloat32m4_t dest, size_t index, vfloat32m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f32m1_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv2f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 2 x float> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vset_v_f32m1_f32m8(vfloat32m8_t dest, size_t index, vfloat32m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f32m2_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv4f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 4 x float> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vset_v_f32m2_f32m8(vfloat32m8_t dest, size_t index, vfloat32m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f32m4_f32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.experimental.vector.insert.nxv16f32.nxv8f32(<vscale x 16 x float> [[DEST:%.*]], <vscale x 8 x float> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vset_v_f32m4_f32m8(vfloat32m8_t dest, size_t index, vfloat32m4_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vset_v_i64m1_i64m2(vint64m2_t dest, size_t index, vint64m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vset_v_i64m1_i64m4(vint64m4_t dest, size_t index, vint64m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vset_v_i64m2_i64m4(vint64m4_t dest, size_t index, vint64m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i64m1_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vset_v_i64m1_i64m8(vint64m8_t dest, size_t index, vint64m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i64m2_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vset_v_i64m2_i64m8(vint64m8_t dest, size_t index, vint64m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_i64m4_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vset_v_i64m4_i64m8(vint64m8_t dest, size_t index, vint64m4_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.experimental.vector.insert.nxv2i64.nxv1i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vset_v_u64m1_u64m2(vuint64m2_t dest, size_t index, vuint64m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv1i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vset_v_u64m1_u64m4(vuint64m4_t dest, size_t index, vuint64m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.vector.insert.nxv4i64.nxv2i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vset_v_u64m2_u64m4(vuint64m4_t dest, size_t index, vuint64m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u64m1_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv1i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vset_v_u64m1_u64m8(vuint64m8_t dest, size_t index, vuint64m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u64m2_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv2i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vset_v_u64m2_u64m8(vuint64m8_t dest, size_t index, vuint64m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_u64m4_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.experimental.vector.insert.nxv8i64.nxv4i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vset_v_u64m4_u64m8(vuint64m8_t dest, size_t index, vuint64m4_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.nxv1f64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vset_v_f64m1_f64m2(vfloat64m2_t dest, size_t index, vfloat64m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv1f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vset_v_f64m1_f64m4(vfloat64m4_t dest, size_t index, vfloat64m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.experimental.vector.insert.nxv4f64.nxv2f64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vset_v_f64m2_f64m4(vfloat64m4_t dest, size_t index, vfloat64m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f64m1_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv1f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 1 x double> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vset_v_f64m1_f64m8(vfloat64m8_t dest, size_t index, vfloat64m1_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f64m2_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv2f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 2 x double> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vset_v_f64m2_f64m8(vfloat64m8_t dest, size_t index, vfloat64m2_t val) {
+  return vset(dest, 0, val);
+}
+
+// CHECK-RV64-LABEL: @test_vset_v_f64m4_f64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.experimental.vector.insert.nxv8f64.nxv4f64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 4 x double> [[VAL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vset_v_f64m4_f64m8(vfloat64m8_t dest, size_t index, vfloat64m4_t val) {
+  return vset(dest, 0, val);
+}

diff  --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
index ea855314baca7..caa3163ed16db 100644
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -170,9 +170,10 @@ class RVVIntrinsic {
 
 public:
   RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName,
-               StringRef IRName, bool HasSideEffects, bool IsMask,
-               bool HasMaskedOffOperand, bool HasVL, bool HasNoMaskedOverloaded,
-               bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types,
+               StringRef MangledSuffix, StringRef IRName, bool HasSideEffects,
+               bool IsMask, bool HasMaskedOffOperand, bool HasVL,
+               bool HasNoMaskedOverloaded, bool HasAutoDef,
+               StringRef ManualCodegen, const RVVTypes &Types,
                const std::vector<int64_t> &IntrinsicTypes,
                StringRef RequiredExtension, unsigned NF);
   ~RVVIntrinsic() = default;
@@ -751,8 +752,8 @@ void RVVType::applyModifier(StringRef Transformer) {
 // RVVIntrinsic implementation
 //===----------------------------------------------------------------------===//
 RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
-                           StringRef NewMangledName, StringRef IRName,
-                           bool HasSideEffects, bool IsMask,
+                           StringRef NewMangledName, StringRef MangledSuffix,
+                           StringRef IRName, bool HasSideEffects, bool IsMask,
                            bool HasMaskedOffOperand, bool HasVL,
                            bool HasNoMaskedOverloaded, bool HasAutoDef,
                            StringRef ManualCodegen, const RVVTypes &OutInTypes,
@@ -771,6 +772,8 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
     MangledName = NewMangledName.str();
   if (!Suffix.empty())
     Name += "_" + Suffix.str();
+  if (!MangledSuffix.empty())
+    MangledName += "_" + MangledSuffix.str();
   if (IsMask) {
     Name += "_m";
   }
@@ -1073,6 +1076,7 @@ void RVVEmitter::createRVVIntrinsics(
     StringRef Name = R->getValueAsString("Name");
     StringRef SuffixProto = R->getValueAsString("Suffix");
     StringRef MangledName = R->getValueAsString("MangledName");
+    StringRef MangledSuffixProto = R->getValueAsString("MangledSuffix");
     StringRef Prototypes = R->getValueAsString("Prototype");
     StringRef TypeRange = R->getValueAsString("TypeRange");
     bool HasMask = R->getValueAsBit("HasMask");
@@ -1147,19 +1151,20 @@ void RVVEmitter::createRVVIntrinsics(
           continue;
 
         auto SuffixStr = getSuffixStr(I, Log2LMUL, SuffixProto);
+        auto MangledSuffixStr = getSuffixStr(I, Log2LMUL, MangledSuffixProto);
         // Create a non-mask intrinsic
         Out.push_back(std::make_unique<RVVIntrinsic>(
-            Name, SuffixStr, MangledName, IRName, HasSideEffects,
-            /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL,
-            HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, Types.getValue(),
-            IntrinsicTypes, RequiredExtension, NF));
+            Name, SuffixStr, MangledName, MangledSuffixStr, IRName,
+            HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false,
+            HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen,
+            Types.getValue(), IntrinsicTypes, RequiredExtension, NF));
         if (HasMask) {
           // Create a mask intrinsic
           Optional<RVVTypes> MaskTypes =
               computeTypes(I, Log2LMUL, NF, ProtoMaskSeq);
           Out.push_back(std::make_unique<RVVIntrinsic>(
-              Name, SuffixStr, MangledName, IRNameMask, HasSideEffects,
-              /*IsMask=*/true, HasMaskedOffOperand, HasVL,
+              Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask,
+              HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL,
               HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask,
               MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF));
         }


        


More information about the cfe-commits mailing list