[clang] f720c22 - [RISCV][Clang] Add RVV Widening Integer Extension intrinsic functions.

Zakk Chen via cfe-commits cfe-commits at lists.llvm.org
Tue Apr 6 03:15:53 PDT 2021


Author: Zakk Chen
Date: 2021-04-06T03:10:14-07:00
New Revision: f720c22e77290187a53fc85cb8d34c313ad07e47

URL: https://github.com/llvm/llvm-project/commit/f720c22e77290187a53fc85cb8d34c313ad07e47
DIFF: https://github.com/llvm/llvm-project/commit/f720c22e77290187a53fc85cb8d34c313ad07e47.diff

LOG: [RISCV][Clang] Add RVV Widening Integer Extension intrinsic functions.

Reviewed By: HsiangKai

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen at sifive.com>

Differential Revision: https://reviews.llvm.org/D99527

Added: 
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c

Modified: 
    clang/include/clang/Basic/riscv_vector.td

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 7ddb773506dd..8a6a5fd8e620 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -259,6 +259,14 @@ multiclass RVVFloatingBinBuiltinSet {
                                  ["vf", "v", "vve"]]>;
 }
 
+multiclass RVVIntExt<string intrinsic_name, string suffix, string prototype,
+                     string type_range> {
+  let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask",
+      MangledName = NAME, IntrinsicTypes = [-1, 0] in {
+    def "" : RVVBuiltin<suffix, prototype, type_range>;
+  }
+}
+
 defvar TypeList = ["c","s","i","l","f","d"];
 defvar EEWList = [["8", "(Log2EEW:3)"],
                   ["16", "(Log2EEW:4)"],
@@ -438,7 +446,18 @@ defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil",
 // TODO
 
 // 12.3. Vector Integer Extension
-// TODO
+let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+    defm vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">;
+    defm vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">;
+}
+let Log2LMUL = [-3, -2, -1, 0, 1] in {
+    defm vsext_vf4 : RVVIntExt<"vsext", "q", "qv", "cs">;
+    defm vzext_vf4 : RVVIntExt<"vzext", "Uq", "UqUv", "cs">;
+}
+let Log2LMUL = [-3, -2, -1, 0] in {
+    defm vsext_vf8 : RVVIntExt<"vsext", "o", "ov", "c">;
+    defm vzext_vf8 : RVVIntExt<"vzext", "Uo", "UoUv", "c">;
+}
 
 // 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
 // TODO

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
new file mode 100644
index 000000000000..026fdbdf5e40
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsext.c
@@ -0,0 +1,820 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
+  return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
+  return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
+  return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
+  return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
+  return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
+  return vsext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
+  return vsext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
+  return vsext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
+  return vsext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
+  return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
+  return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
+  return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
+  return vsext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
+  return vsext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+                                    vint8mf8_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+                                    vint8mf4_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+                                  vint8mf2_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+                                  vint8m1_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+                                  vint8m2_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+                                  vint8m4_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+                                    vint8mf8_t op1, size_t vl) {
+  return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+                                  vint8mf4_t op1, size_t vl) {
+  return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+                                  vint8mf2_t op1, size_t vl) {
+  return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+                                  vint8m1_t op1, size_t vl) {
+  return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+                                  vint8m2_t op1, size_t vl) {
+  return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+                                  vint8mf8_t op1, size_t vl) {
+  return vsext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+                                  vint8mf4_t op1, size_t vl) {
+  return vsext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+                                  vint8mf2_t op1, size_t vl) {
+  return vsext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+                                  vint8m1_t op1, size_t vl) {
+  return vsext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+                                    vint16mf4_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+                                  vint16mf2_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+                                  vint16m1_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+                                  vint16m2_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+                                  vint16m4_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+                                  vint16mf4_t op1, size_t vl) {
+  return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+                                  vint16mf2_t op1, size_t vl) {
+  return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+                                  vint16m1_t op1, size_t vl) {
+  return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+                                  vint16m2_t op1, size_t vl) {
+  return vsext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+                                  vint32mf2_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+                                  vint32m1_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+                                  vint32m2_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+                                  vint32m4_t op1, size_t vl) {
+  return vsext_vf2(mask, maskedoff, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
new file mode 100644
index 000000000000..2dc9d3d7824d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vzext.c
@@ -0,0 +1,820 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7:#.*]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
+  return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
+  return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
+  return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
+  return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
+  return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
+  return vzext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
+  return vzext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
+  return vzext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
+  return vzext_vf8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
+  return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
+  return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
+  return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
+  return vzext_vf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
+  return vzext_vf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+                                     vuint8mf8_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+                                     vuint8mf4_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+                                   vuint8mf2_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+                                   vuint8m1_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+                                   vuint8m2_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+                                   vuint8m4_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+                                     vuint8mf8_t op1, size_t vl) {
+  return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+                                   vuint8mf4_t op1, size_t vl) {
+  return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+                                   vuint8mf2_t op1, size_t vl) {
+  return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+                                   vuint8m1_t op1, size_t vl) {
+  return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+                                   vuint8m2_t op1, size_t vl) {
+  return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+                                   vuint8mf8_t op1, size_t vl) {
+  return vzext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+                                   vuint8mf4_t op1, size_t vl) {
+  return vzext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+                                   vuint8mf2_t op1, size_t vl) {
+  return vzext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+                                   vuint8m1_t op1, size_t vl) {
+  return vzext_vf8(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+                                     vuint16mf4_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+                                   vuint16mf2_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+                                   vuint16m1_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+                                   vuint16m2_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+                                   vuint16m4_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+                                   vuint16mf4_t op1, size_t vl) {
+  return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+                                   vuint16mf2_t op1, size_t vl) {
+  return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+                                   vuint16m1_t op1, size_t vl) {
+  return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+                                   vuint16m2_t op1, size_t vl) {
+  return vzext_vf4(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+                                   vuint32mf2_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+                                   vuint32m1_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+                                   vuint32m2_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]]) [[ATTR7]]
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+                                   vuint32m4_t op1, size_t vl) {
+  return vzext_vf2(mask, maskedoff, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
new file mode 100644
index 000000000000..c2634603e65f
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsext.c
@@ -0,0 +1,820 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
+  return vsext_vf2_i16mf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
+  return vsext_vf2_i16mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
+  return vsext_vf2_i16m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
+  return vsext_vf2_i16m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
+  return vsext_vf2_i16m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
+  return vsext_vf2_i16m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
+  return vsext_vf4_i32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
+  return vsext_vf4_i32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
+  return vsext_vf4_i32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
+  return vsext_vf4_i32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
+  return vsext_vf4_i32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
+  return vsext_vf8_i64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
+  return vsext_vf8_i64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
+  return vsext_vf8_i64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
+  return vsext_vf8_i64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
+  return vsext_vf2_i32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
+  return vsext_vf2_i32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
+  return vsext_vf2_i32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
+  return vsext_vf2_i32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
+  return vsext_vf2_i32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
+  return vsext_vf4_i64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
+  return vsext_vf4_i64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
+  return vsext_vf4_i64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
+  return vsext_vf4_i64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
+  return vsext_vf2_i64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
+  return vsext_vf2_i64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
+  return vsext_vf2_i64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
+  return vsext_vf2_i64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vsext_vf2_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
+                                    vint8mf8_t op1, size_t vl) {
+  return vsext_vf2_i16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vsext_vf2_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
+                                    vint8mf4_t op1, size_t vl) {
+  return vsext_vf2_i16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vsext_vf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
+                                  vint8mf2_t op1, size_t vl) {
+  return vsext_vf2_i16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vsext_vf2_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
+                                  vint8m1_t op1, size_t vl) {
+  return vsext_vf2_i16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vsext_vf2_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
+                                  vint8m2_t op1, size_t vl) {
+  return vsext_vf2_i16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vsext_vf2_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
+                                  vint8m4_t op1, size_t vl) {
+  return vsext_vf2_i16m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf4_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+                                    vint8mf8_t op1, size_t vl) {
+  return vsext_vf4_i32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf4_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+                                  vint8mf4_t op1, size_t vl) {
+  return vsext_vf4_i32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf4_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+                                  vint8mf2_t op1, size_t vl) {
+  return vsext_vf4_i32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf4_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+                                  vint8m1_t op1, size_t vl) {
+  return vsext_vf4_i32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf4_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+                                  vint8m2_t op1, size_t vl) {
+  return vsext_vf4_i32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf8_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+                                  vint8mf8_t op1, size_t vl) {
+  return vsext_vf8_i64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf8_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+                                  vint8mf4_t op1, size_t vl) {
+  return vsext_vf8_i64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf8_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+                                  vint8mf2_t op1, size_t vl) {
+  return vsext_vf8_i64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf8_i64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf8_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+                                  vint8m1_t op1, size_t vl) {
+  return vsext_vf8_i64m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vsext_vf2_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
+                                    vint16mf4_t op1, size_t vl) {
+  return vsext_vf2_i32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vsext_vf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
+                                  vint16mf2_t op1, size_t vl) {
+  return vsext_vf2_i32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vsext_vf2_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
+                                  vint16m1_t op1, size_t vl) {
+  return vsext_vf2_i32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vsext_vf2_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
+                                  vint16m2_t op1, size_t vl) {
+  return vsext_vf2_i32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vsext_vf2_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
+                                  vint16m4_t op1, size_t vl) {
+  return vsext_vf2_i32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf4_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+                                  vint16mf4_t op1, size_t vl) {
+  return vsext_vf4_i64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf4_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+                                  vint16mf2_t op1, size_t vl) {
+  return vsext_vf4_i64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf4_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+                                  vint16m1_t op1, size_t vl) {
+  return vsext_vf4_i64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf4_i64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf4_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+                                  vint16m2_t op1, size_t vl) {
+  return vsext_vf4_i64m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vsext_vf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
+                                  vint32mf2_t op1, size_t vl) {
+  return vsext_vf2_i64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vsext_vf2_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
+                                  vint32m1_t op1, size_t vl) {
+  return vsext_vf2_i64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vsext_vf2_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
+                                  vint32m2_t op1, size_t vl) {
+  return vsext_vf2_i64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vsext_vf2_i64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vsext_vf2_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
+                                  vint32m4_t op1, size_t vl) {
+  return vsext_vf2_i64m8_m(mask, maskedoff, op1, vl);
+}

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c
new file mode 100644
index 000000000000..5fd4609715f9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vzext.c
@@ -0,0 +1,820 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+// RUN: %clang_cc1 -triple riscv64 -target-feature +m -target-feature +experimental-v -Werror -Wall -o - %s -S >/dev/null 2>&1 | FileCheck --check-prefix=ASM --allow-empty %s
+
+// ASM-NOT: warning
+#include <riscv_vector.h>
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
+  return vzext_vf2_u16mf4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
+  return vzext_vf2_u16mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
+  return vzext_vf2_u16m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
+  return vzext_vf2_u16m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
+  return vzext_vf2_u16m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i32(<vscale x 32 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
+  return vzext_vf2_u16m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
+  return vzext_vf4_u32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
+  return vzext_vf4_u32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
+  return vzext_vf4_u32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
+  return vzext_vf4_u32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i32(<vscale x 16 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
+  return vzext_vf4_u32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i32(<vscale x 1 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
+  return vzext_vf8_u64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i32(<vscale x 2 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
+  return vzext_vf8_u64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i32(<vscale x 4 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
+  return vzext_vf8_u64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i32(<vscale x 8 x i8> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
+  return vzext_vf8_u64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32mf2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
+  return vzext_vf2_u32mf2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
+  return vzext_vf2_u32m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
+  return vzext_vf2_u32m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
+  return vzext_vf2_u32m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i32(<vscale x 16 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
+  return vzext_vf2_u32m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i32(<vscale x 1 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
+  return vzext_vf4_u64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i32(<vscale x 2 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
+  return vzext_vf4_u64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i32(<vscale x 4 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
+  return vzext_vf4_u64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i32(<vscale x 8 x i16> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
+  return vzext_vf4_u64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m1(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i32(<vscale x 1 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
+  return vzext_vf2_u64m1(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m2(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i32(<vscale x 2 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
+  return vzext_vf2_u64m2(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m4(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i32(<vscale x 4 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
+  return vzext_vf2_u64m4(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m8(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i32(<vscale x 8 x i32> [[OP1:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
+  return vzext_vf2_u64m8(op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i32(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vzext_vf2_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
+                                     vuint8mf8_t op1, size_t vl) {
+  return vzext_vf2_u16mf4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i32(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzext_vf2_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
+                                     vuint8mf4_t op1, size_t vl) {
+  return vzext_vf2_u16mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i32(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzext_vf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
+                                   vuint8mf2_t op1, size_t vl) {
+  return vzext_vf2_u16m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i32(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzext_vf2_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
+                                   vuint8m1_t op1, size_t vl) {
+  return vzext_vf2_u16m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i32(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzext_vf2_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
+                                   vuint8m2_t op1, size_t vl) {
+  return vzext_vf2_u16m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u16m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i32(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzext_vf2_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
+                                   vuint8m4_t op1, size_t vl) {
+  return vzext_vf2_u16m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf4_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+                                     vuint8mf8_t op1, size_t vl) {
+  return vzext_vf4_u32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf4_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+                                   vuint8mf4_t op1, size_t vl) {
+  return vzext_vf4_u32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf4_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+                                   vuint8mf2_t op1, size_t vl) {
+  return vzext_vf4_u32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf4_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+                                   vuint8m1_t op1, size_t vl) {
+  return vzext_vf4_u32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf4_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+                                   vuint8m2_t op1, size_t vl) {
+  return vzext_vf4_u32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf8_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+                                   vuint8mf8_t op1, size_t vl) {
+  return vzext_vf8_u64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf8_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+                                   vuint8mf4_t op1, size_t vl) {
+  return vzext_vf8_u64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf8_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+                                   vuint8mf2_t op1, size_t vl) {
+  return vzext_vf8_u64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf8_u64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf8_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+                                   vuint8m1_t op1, size_t vl) {
+  return vzext_vf8_u64m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32mf2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i32(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vzext_vf2_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
+                                     vuint16mf4_t op1, size_t vl) {
+  return vzext_vf2_u32mf2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i32(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzext_vf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
+                                   vuint16mf2_t op1, size_t vl) {
+  return vzext_vf2_u32m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i32(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzext_vf2_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
+                                   vuint16m1_t op1, size_t vl) {
+  return vzext_vf2_u32m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i32(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzext_vf2_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
+                                   vuint16m2_t op1, size_t vl) {
+  return vzext_vf2_u32m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u32m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i32(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzext_vf2_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
+                                   vuint16m4_t op1, size_t vl) {
+  return vzext_vf2_u32m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf4_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+                                   vuint16mf4_t op1, size_t vl) {
+  return vzext_vf4_u64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf4_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+                                   vuint16mf2_t op1, size_t vl) {
+  return vzext_vf4_u64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf4_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+                                   vuint16m1_t op1, size_t vl) {
+  return vzext_vf4_u64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf4_u64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf4_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+                                   vuint16m2_t op1, size_t vl) {
+  return vzext_vf4_u64m8_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m1_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i32(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vzext_vf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
+                                   vuint32mf2_t op1, size_t vl) {
+  return vzext_vf2_u64m1_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m2_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i32(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzext_vf2_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
+                                   vuint32m1_t op1, size_t vl) {
+  return vzext_vf2_u64m2_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m4_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i32(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzext_vf2_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
+                                   vuint32m2_t op1, size_t vl) {
+  return vzext_vf2_u64m4_m(mask, maskedoff, op1, vl);
+}
+
+// CHECK-RV32-LABEL: @test_vzext_vf2_u64m8_m(
+// CHECK-RV32-NEXT:  entry:
+// CHECK-RV32-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i32(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i32 [[VL:%.*]])
+// CHECK-RV32-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzext_vf2_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
+                                   vuint32m4_t op1, size_t vl) {
+  return vzext_vf2_u64m8_m(mask, maskedoff, op1, vl);
+}


        


More information about the cfe-commits mailing list